summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--tags/2.6.18-11/00000_README334
-rw-r--r--tags/2.6.18-11/10001_xen-3.2.0.patch139942
-rw-r--r--tags/2.6.18-11/30001_nfnetlink_log-null-deref.patch37
-rw-r--r--tags/2.6.18-11/30002_nf_conntrack-set-nfctinfo.patch35
-rw-r--r--tags/2.6.18-11/30003_netlink-infinite-recursion.patch65
-rw-r--r--tags/2.6.18-11/30004_nl_fib_lookup-oops.patch34
-rw-r--r--tags/2.6.18-11/30005_core-dump-unreadable-PT_INTERP.patch70
-rw-r--r--tags/2.6.18-11/30006_appletalk-length-mismatch.patch93
-rw-r--r--tags/2.6.18-11/30007_cm4040-buffer-overflow.patch44
-rw-r--r--tags/2.6.18-11/30008_ipv6_fl_socklist-no-share.patch32
-rw-r--r--tags/2.6.18-11/30009_keys-serial-num-collision.patch92
-rw-r--r--tags/2.6.18-11/30010_ipv6_getsockopt_sticky-null-opt.patch42
-rw-r--r--tags/2.6.18-11/30011_ipv6_setsockopt-NULL-deref.patch28
-rw-r--r--tags/2.6.18-11/30012_ipv6-disallow-RH0-by-default.patch166
-rw-r--r--tags/2.6.18-11/30013_listxattr-mem-corruption.patch441
-rw-r--r--tags/2.6.18-11/30014_bluetooth-l2cap-hci-info-leaks.patch63
-rw-r--r--tags/2.6.18-11/30015_usblcd-limit-memory-consumption.patch89
-rw-r--r--tags/2.6.18-11/30016_pppoe-socket-release-mem-leak.patch42
-rw-r--r--tags/2.6.18-11/30017_nf_conntrack_h323-bounds-checking.patch42
-rw-r--r--tags/2.6.18-11/30018_dn_fib-out-of-bounds.patch37
-rw-r--r--tags/2.6.18-11/30019_random-fix-seeding-with-zero-entropy.patch97
-rw-r--r--tags/2.6.18-11/30020_random-fix-error-in-entropy-extraction.patch51
-rw-r--r--tags/2.6.18-11/30021_nf_conntrack_sctp-null-deref.patch49
-rw-r--r--tags/2.6.18-11/30022_i965-secure-batchbuffer.patch67
-rw-r--r--tags/2.6.18-11/30023_appletalk-endianness-annotations.patch285
-rw-r--r--tags/2.6.18-11/30024_drm-i965.patch221
-rw-r--r--tags/2.6.18-11/30025_ipv4-fib_props-out-of-bounds.patch42
-rw-r--r--tags/2.6.18-11/30026_cifs-fix-sign-settings.patch179
-rw-r--r--tags/2.6.18-11/30027_cpuset_tasks-underflow.patch61
-rw-r--r--tags/2.6.18-11/30028_random-bound-check-ordering.patch42
-rw-r--r--tags/2.6.18-11/30030_aacraid-ioctl-perm-check.patch40
-rw-r--r--tags/2.6.18-11/30031_ptrace-handle-bogus-selector.patch86
-rw-r--r--tags/2.6.18-11/30032_fixup-trace_irq-breakage.patch64
-rw-r--r--tags/2.6.18-11/30033_prevent-stack-growth-into-hugetlb-region.patch47
-rw-r--r--tags/2.6.18-11/30034_cifs-honor-umask.patch81
-rw-r--r--tags/2.6.18-11/30035_amd64-zero-extend-32bit-ptrace.patch88
-rw-r--r--tags/2.6.18-11/30036_jffs2-ACL-vs-mode-handling.patch355
-rw-r--r--tags/2.6.18-11/30039_hugetlb-prio_tree-unit-fix.patch85
-rw-r--r--tags/2.6.18-11/30040_usb-pwc-disconnect-block.patch124
-rw-r--r--tags/2.6.18-11/30041_ipv6-disallow-RH0-by-default-2.patch30
-rw-r--r--tags/2.6.18-11/30042_reset-pdeathsig-on-suid-upstream.patch51
-rw-r--r--tags/2.6.18-11/30044_cifs-better-failed-mount-errors.patch234
-rw-r--r--tags/2.6.18-11/30045_cifs-corrupt-server-response-overflow.patch694
-rw-r--r--tags/2.6.18-11/30046_wait_task_stopped-hang.patch38
-rw-r--r--tags/2.6.18-11/30047_ieee80211-underflow.patch54
-rw-r--r--tags/2.6.18-11/30048_sysfs_readdir-NULL-deref-1.patch112
-rw-r--r--tags/2.6.18-11/30049_sysfs_readdir-NULL-deref-2.patch128
-rw-r--r--tags/2.6.18-11/30050_sysfs-fix-condition-check.patch29
-rw-r--r--tags/2.6.18-11/30051_tmpfs-restore-clear_highpage.patch44
-rw-r--r--tags/2.6.18-11/30052_minixfs-printk-hang.patch76
-rw-r--r--tags/2.6.18-11/30053_hrtimer-large-relative-timeouts-overflow.patch45
-rw-r--r--tags/2.6.18-11/30054_coredump-only-to-same-uid.patch38
-rw-r--r--tags/2.6.18-11/30055_isdn-net-overflow.patch54
-rw-r--r--tags/2.6.18-11/30056_proc-snd-page-alloc-mem-leak.patch169
-rw-r--r--tags/2.6.18-11/30057_fat-move-ioctl-compat-code.patch167
-rw-r--r--tags/2.6.18-11/30058_fat-fix-compat-ioctls.patch311
-rw-r--r--tags/2.6.18-11/30059_vfs-use-access-mode-flag.patch52
-rw-r--r--tags/2.6.18-11/30060_i4l-isdn_ioctl-mem-overrun.patch56
-rw-r--r--tags/2.6.18-11/30061_vmsplice-security.patch28
-rw-r--r--tags/2.6.18-11/30062_clear-spurious-irq.patch34
-rw-r--r--tags/2.6.18-11/30063_mmap-VM_DONTEXPAND.patch120
-rw-r--r--tags/2.6.18-11/30064_RLIMIT_CPU-earlier-checking.patch80
-rw-r--r--tags/2.6.18-11/30065_dnotify-race.patch22
-rw-r--r--tags/2.6.18-11/30066_fcntl_setlk-close-race.patch76
-rw-r--r--tags/2.6.18-11/30067_sit-missing-kfree_skb-on-pskb_may_pull.patch26
-rw-r--r--tags/2.6.18-11/30068_hrtimer-prevent-overrun.patch38
-rw-r--r--tags/2.6.18-11/30069_ktime-fix-MTIME_SEC_MAX-on-32-bit.patch29
-rw-r--r--tags/2.6.18-11/30070_amd64-cs-corruption.patch12
-rw-r--r--tags/2.6.18-11/30071_dccp-feature-length-check.patch15
-rw-r--r--tags/2.6.18-11/30072_asn1-ber-decoding-checks.patch103
-rw-r--r--tags/2.6.18-11/50009_gentooify-tls-warning.patch16
71 files changed, 146773 insertions, 0 deletions
diff --git a/tags/2.6.18-11/00000_README b/tags/2.6.18-11/00000_README
new file mode 100644
index 0000000..7ee89f7
--- /dev/null
+++ b/tags/2.6.18-11/00000_README
@@ -0,0 +1,334 @@
+Xen Patches README
+------------------
+
+These patches are intended to be stacked on top of genpatches-base.
+
+Many of the patches included here are swiped from various sources which
+use their own four digit patch numbering scheme, so we are stuck with five
+digits to indiciate the source for easier tracking and re-syncing.
+
+Numbering
+---------
+
+0xxxx Gentoo, not related to Xen. (in case we pull something from extras)
+1xxxx XenSource, upstream Xen patch for 2.6.18
+2xxxx Redhat, we use their Xen patch for >=2.6.20
+3xxxx Debian, we use their security fixes for 2.6.18
+5xxxx Gentoo, Xen and other fixes for Redhat and/or Debian patches.
+
+Patches
+-------
+
+10001_xen-3.2.0.patch
+ Upstream 3.2.0 patch
+
+30001_nfnetlink_log-null-deref.patch
+ [SECURITY] Fix remotely exploitable NULL pointer dereference in
+ nfulnl_recv_config()
+ See CVE-2007-1496
+
+30002_nf_conntrack-set-nfctinfo.patch
+ [SECURITY] Fix incorrect classification of IPv6 fragments as ESTABLISHED,
+ which allows remote attackers to bypass certain rulesets
+ See CVE-2007-1497
+
+30003_netlink-infinite-recursion.patch
+ [SECURITY] Fix infinite recursion bug in netlink
+ See CVE-2007-1861
+
+30004_nl_fib_lookup-oops.patch
+ Add fix for oops bug added by previous patch
+
+30005_core-dump-unreadable-PT_INTERP.patch
+ [SECURITY] Fix a vulnerability that allows local users to read
+ otherwise unreadable (but executable) files by triggering a core dump.
+ See CVE-2007-0958
+
+30006_appletalk-length-mismatch.patch
+ [SECURITY] Fix a remote DoS (crash) in appletalk
+ Depends upon bugfix/appletalk-endianness-annotations.patch
+ See CVE-2007-1357
+
+30007_cm4040-buffer-overflow.patch
+ [SECURITY] Fix a buffer overflow in the Omnikey CardMan 4040 driver
+ See CVE-2007-0005
+
+30008_ipv6_fl_socklist-no-share.patch
+ [SECURITY] Fix local DoS vulnerability caused by inadvertently sharing
+ ipv6_fl_socklist between the listening socket and the socket created
+ for connection.
+ See CVE-2007-1592
+
+30009_keys-serial-num-collision.patch
+ [SECURITY] Fix the key serial number collision avoidance code in
+ key_alloc_serial() that could lead to a local DoS (oops).
+ (closes: #398470)
+ See CVE-2007-0006
+
+30010_ipv6_getsockopt_sticky-null-opt.patch
+ [SECURITY] Fix kernel memory leak vulnerability in
+ ipv6_getsockopt_sticky() which can be triggered by passing a len < 0.
+ See CVE-2007-1000
+
+30011_ipv6_setsockopt-NULL-deref.patch
+ [SECURITY] Fix NULL dereference in ipv6_setsockopt that could lead
+ to a local DoS (oops).
+ See CVE-2007-1388
+
+30012_ipv6-disallow-RH0-by-default.patch
+ [SECURITY] Avoid a remote DoS (network amplification between two routers)
+ by disabling type0 IPv6 route headers by default. Can be re-enabled via
+ a sysctl interface. Thanks to Vlad Yasevich for porting help.
+
+30013_listxattr-mem-corruption.patch
+ [SECURITY] Fix userspace corruption vulnerability caused by
+ incorrectly promoted return values in bad_inode_ops
+ This patch changes the kernel ABI.
+ See CVE-2006-5753
+
+30014_bluetooth-l2cap-hci-info-leaks.patch
+ [SECURITY] Fix information leaks in setsockopt() implementations
+ See CVE-2007-1353
+
+30015_usblcd-limit-memory-consumption.patch
+ [SECURITY] limit memory consumption during write in the usblcd driver
+ See CVE-2007-3513
+
+30016_pppoe-socket-release-mem-leak.patch
+ [SECURITY] fix unpriveleged memory leak when a PPPoE socket is released
+ after connect but before PPPIOCGCHAN ioctl is called upon it
+ See CVE-2007-2525
+
+30017_nf_conntrack_h323-bounds-checking.patch
+ [SECURITY] nf_conntrack_h323: add checking of out-of-range on choices'
+ index values
+ See CVE-2007-3642
+
+30018_dn_fib-out-of-bounds.patch
+ [SECURITY] Fix out of bounds condition in dn_fib_props[]
+ See CVE-2007-2172
+
+30019_random-fix-seeding-with-zero-entropy.patch,
+30020_random-fix-error-in-entropy-extraction.patch
+ [SECURITY] Avoid seeding with the same values at boot time when a
+ system has no entropy source and fix a casting error in entropy
+ extraction that resulted in slightly less random numbers.
+ See CVE-2007-2453
+
+30021_nf_conntrack_sctp-null-deref.patch
+ [SECURITY] Fix remotely triggerable NULL pointer dereference
+ by sending an unknown chunk type.
+ See CVE-2007-2876
+
+30022_i965-secure-batchbuffer.patch
+ [SECURITY] Fix i965 secured batchbuffer usage
+ See CVE-2007-3851
+
+30023_appletalk-endianness-annotations.patch
+ Dependency for 30006_appletalk-length-mismatch.patch.
+
+30024_drm-i965.patch
+ Dependency for 30022_i965-secure-batchbuffer.patch
+
+30025_ipv4-fib_props-out-of-bounds.patch
+ [SECURITY] Fix a typo which caused fib_props[] to be of the wrong size
+ and check for out of bounds condition in index provided by userspace
+ See CVE-2007-2172
+
+30026_cifs-fix-sign-settings.patch
+ [SECURITY] Fix overriding the server to force signing on caused by
+ checking the wrong gloal variable.
+ See CVE-2007-3843
+
+30027_cpuset_tasks-underflow.patch
+ [SECURITY] Fix integer underflow in /dev/cpuset/tasks which could allow
+ local attackers to read sensitive kernel memory if the cpuset filesystem
+ is mounted.
+ See CVE-2007-2875
+
+30028_random-bound-check-ordering.patch
+ [SECURITY] Fix stack-based buffer overflow in the random number
+ generator
+ See CVE-2007-3105
+
+30030_aacraid-ioctl-perm-check.patch
+ [SECURITY] Require admin capabilities to issue ioctls to aacraid devices
+ See CVE-2007-4308
+
+30031_ptrace-handle-bogus-selector.patch,
+30032_fixup-trace_irq-breakage.patch
+ [SECURITY] Handle an invalid LDT segment selector %cs (the xcs field)
+ during ptrace single-step operations that can be used to trigger a
+ NULL-pointer dereference causing an Oops.
+ See CVE-2007-3731
+
+30033_prevent-stack-growth-into-hugetlb-region.patch
+ [SECURITY] Prevent OOPS during stack expansion when the VMA crosses
+ into address space reserved for hugetlb pages.
+ See CVE-2007-3739
+
+30034_cifs-honor-umask.patch
+ [SECURITY] Make CIFS honor a process' umask
+ See CVE-2007-3740
+
+30035_amd64-zero-extend-32bit-ptrace.patch
+ [SECURITY] Zero extend all registers after ptrace in 32-bit entry path.
+ See CVE-2007-4573
+
+30036_jffs2-ACL-vs-mode-handling.patch
+ [SECURITY] Write correct legacy modes to the medium on inode creation to
+ prevent incorrect permissions upon remount.
+ See CVE-2007-4849
+
+30039_hugetlb-prio_tree-unit-fix.patch
+ [SECURITY] Fix misconversion of hugetlb_vmtruncate_list to prio_tree
+ which could be used to trigger a BUG_ON() call in exit_mmap.
+ See CVE-2007-4133
+
+30040_usb-pwc-disconnect-block.patch
+ [SECURITY] Fix issue with unplugging webcams that use the pwc driver.
+ If userspace still has the device open it can result, the driver would
+ wait for the device to close, blocking the USB subsystem.
+ See CVE-2007-5093
+
+30041_ipv6-disallow-RH0-by-default-2.patch
+ Fix ipv6 rfc conformance issue introduced in 2.6.18.dfsg.1-13 by the
+ fix for CVE-2007-2242. Thanks to Brian Haley for the patch.
+ (closes: Debian #440127)
+
+/* This is already in Xen 3.2
+30042_reset-pdeathsig-on-suid-upstream.patch
+ Update fix for CVE-2007-3848 with the patch accepted upstream
+ (formerly 30013_reset-pdeathsig-on-suid.patch)
+*/
+
+30043_don-t-leak-nt-bit-into-next-task-xen.patch
+ [SECURITY] Don't leak NT bit into next task (Xen).
+ See CVE-2006-5755
+
+30044_cifs-better-failed-mount-errors.patch,
+30045_cifs-corrupt-server-response-overflow.patch
+ [SECURITY][CIFS] Fix multiple overflows that can be remotely triggered
+ by a server sending a corrupt response.
+ See CVE-2007-5904
+
+30046_wait_task_stopped-hang.patch
+ [SECURITY] wait_task_stopped was incorrectly testing for TASK_TRACED -
+ check p->exit_state instead avoiding a potential system hang
+ See CVE-2007-5500
+
+30047_ieee80211-underflow.patch
+ [SECURITY] Fix integer overflow in ieee80211 which makes it possible
+ for a malicious frame to crash a system using a driver built on top of
+ the Linux 802.11 wireless code.
+ See CVE-2007-4997
+
+30048_sysfs_readdir-NULL-deref-1.patch,
+30049_sysfs_readdir-NULL-deref-2.patch,
+30050_sysfs-fix-condition-check.patch
+ [SECURITY] Fix potential NULL pointer dereference which can lead to
+ a local DoS (kernel oops)
+ See CVE-2007-3104
+
+30051_tmpfs-restore-clear_highpage.patch
+ [SECURITY] Fix a theoretical kernel memory leak in the tmpfs filesystem
+ See CVE-2007-6417
+
+30052_minixfs-printk-hang.patch
+ [SECURITY] Rate-limit printks caused by accessing a corrupted minixfs
+ filesystem that would otherwise cause a system to hang (printk storm)
+ See CVE-2006-6058
+
+30053_hrtimer-large-relative-timeouts-overflow.patch
+ [SECURITY] Avoid overflow in hrtimers due to large relative timeouts
+ See CVE-2007-5966
+
+30054_coredump-only-to-same-uid.patch
+ [SECURITY] Fix an issue where core dumping over a file that
+ already exists retains the ownership of the original file
+ See CVE-2007-6206
+
+30055_isdn-net-overflow.patch
+ [SECURITY] Fix potential overflows in the ISDN subsystem
+ See CVE-2007-6063
+
+30056_proc-snd-page-alloc-mem-leak.patch
+ [SECURITY][ABI Changer] Fix an issue in the alsa subsystem that allows a
+ local user to read potentially sensitive kernel memory from the proc
+ filesystem
+ See CVE-2007-4571
+
+30057_fat-move-ioctl-compat-code.patch
+30058_bugfix/fat-fix-compat-ioctls.patch
+ [SECURITY][ABI Changer] Fix kernel_dirent corruption in the compat layer
+ for fat ioctls
+ See CVE-2007-2878
+
+30059_vfs-use-access-mode-flag.patch
+ [SECURITY] Use the access mode flag instead of the open flag when
+ testing access mode for a directory. Modify
+ features/all/vserver/vs2.0.2.2-rc9.patch to apply on top of this
+ See CVE-2008-0001
+
+30060_i4l-isdn_ioctl-mem-overrun.patch
+ [SECURITY] Fix potential isdn ioctl memory overrun
+ See CVE-2007-6151
+
+30061_vmsplice-security.patch
+ [SECURITY] Fix missing access check in vmsplice.
+ See CVE-2008-0010, CVE-2008-0600
+
+30062_clear-spurious-irq.patch
+ Fix a minor denial of service issue that allows local users to disable
+ an interrupt by causing an interrupt handler to be quickly inserted/removed.
+ This has only been shown to happen with certain serial devices so can only
+ be triggered by a user who already has additional priveleges (dialout
+ group). (closes: Debian #404815)
+
+30063_mmap-VM_DONTEXPAND.patch
+ [SECURITY] Add VM_DONTEXPAND to vm_flags in drivers that register
+ a fault handler but do not bounds check the offset argument
+ See CVE-2008-0007
+
+30064_RLIMIT_CPU-earlier-checking.patch
+ [SECURITY] Move check for an RLIMIT_CPU with a value of 0 earlier
+ to prevent a user escape (closes: #419706)
+ See CVE-2008-1294
+
+30065_dnotify-race.patch
+ [SECURITY] Fix a race in the directory notify
+ See CVE-2008-1375
+
+30066_fcntl_setlk-close-race.patch
+ [SECURITY] Fix an SMP race to prevent reordering of flock updates
+ and accesses to the descriptor table on close().
+ See CVE-2008-1669
+
+30067_sit-missing-kfree_skb-on-pskb_may_pull.patch
+ [SECURITY] Fix remotely-triggerable memory leak in the Simple
+ Internet Transition (SIT) code used for IPv6 over IPv4 tunnels
+ See CVE-2008-2136
+
+30068_hrtimer-prevent-overrun.patch
+30069_ktime-fix-MTIME_SEC_MAX-on-32-bit.patch
+ [SECURITY] Fix potential infinite loop in hrtimer_forward on
+ 64-bit systems
+ See CVE-2007-6712
+
+30070_amd64-cs-corruption.patch
+ [SECURITY] Fix local ptrace denial of service for amd64 flavor
+ kernels, bug #480390
+ See CVE-2008-1615
+
+30071_dccp-feature-length-check.patch
+ [SECURITY] Validate feature length to avoid heap overflow
+ See CVE-2008-2358
+
+30072_asn1-ber-decoding-checks.patch
+ [SECURITY] Validate lengths in ASN.1 decoding code to avoid
+ heap overflow
+ See CVE-2008-1673
+
+
+50009_gentooify-tls-warning.patch
+ Change tls warning instructions to apply directly to Gentoo.
diff --git a/tags/2.6.18-11/10001_xen-3.2.0.patch b/tags/2.6.18-11/10001_xen-3.2.0.patch
new file mode 100644
index 0000000..d3d6c2b
--- /dev/null
+++ b/tags/2.6.18-11/10001_xen-3.2.0.patch
@@ -0,0 +1,139942 @@
+diff -rpuN linux-2.6.18.8/arch/i386/Kconfig linux-2.6.18-xen-3.2.0/arch/i386/Kconfig
+--- linux-2.6.18.8/arch/i386/Kconfig 2007-02-23 15:52:30.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/Kconfig 2008-02-15 16:21:49.000000000 -0800
+@@ -16,6 +16,7 @@ config X86_32
+
+ config GENERIC_TIME
+ bool
++ depends on !X86_XEN
+ default y
+
+ config LOCKDEP_SUPPORT
+@@ -103,6 +104,16 @@ config X86_PC
+ help
+ Choose this option if your computer is a standard PC or compatible.
+
++config X86_XEN
++ bool "Xen-compatible"
++ select XEN
++ select X86_UP_APIC if !SMP && XEN_PRIVILEGED_GUEST
++ select X86_UP_IOAPIC if !SMP && XEN_PRIVILEGED_GUEST
++ select SWIOTLB
++ help
++ Choose this option if you plan to run this kernel on top of the
++ Xen Hypervisor.
++
+ config X86_ELAN
+ bool "AMD Elan"
+ help
+@@ -213,6 +224,7 @@ source "arch/i386/Kconfig.cpu"
+
+ config HPET_TIMER
+ bool "HPET Timer Support"
++ depends on !X86_XEN
+ help
+ This enables the use of the HPET for the kernel's internal timer.
+ HPET is the next generation timer replacing legacy 8254s.
+@@ -263,7 +275,7 @@ source "kernel/Kconfig.preempt"
+
+ config X86_UP_APIC
+ bool "Local APIC support on uniprocessors"
+- depends on !SMP && !(X86_VISWS || X86_VOYAGER)
++ depends on !SMP && !(X86_VISWS || X86_VOYAGER || XEN_UNPRIVILEGED_GUEST)
+ help
+ A local APIC (Advanced Programmable Interrupt Controller) is an
+ integrated interrupt controller in the CPU. If you have a single-CPU
+@@ -288,12 +300,12 @@ config X86_UP_IOAPIC
+
+ config X86_LOCAL_APIC
+ bool
+- depends on X86_UP_APIC || ((X86_VISWS || SMP) && !X86_VOYAGER)
++ depends on X86_UP_APIC || ((X86_VISWS || SMP) && !(X86_VOYAGER || XEN_UNPRIVILEGED_GUEST))
+ default y
+
+ config X86_IO_APIC
+ bool
+- depends on X86_UP_IOAPIC || (SMP && !(X86_VISWS || X86_VOYAGER))
++ depends on X86_UP_IOAPIC || (SMP && !(X86_VISWS || X86_VOYAGER || XEN_UNPRIVILEGED_GUEST))
+ default y
+
+ config X86_VISWS_APIC
+@@ -303,7 +315,7 @@ config X86_VISWS_APIC
+
+ config X86_MCE
+ bool "Machine Check Exception"
+- depends on !X86_VOYAGER
++ depends on !(X86_VOYAGER || X86_XEN)
+ ---help---
+ Machine Check Exception support allows the processor to notify the
+ kernel if it detects a problem (e.g. overheating, component failure).
+@@ -402,6 +414,7 @@ config X86_REBOOTFIXUPS
+
+ config MICROCODE
+ tristate "/dev/cpu/microcode - Intel IA32 CPU microcode support"
++ depends on !XEN_UNPRIVILEGED_GUEST
+ ---help---
+ If you say Y here and also to "/dev file system support" in the
+ 'File systems' section, you will be able to update the microcode on
+@@ -434,6 +447,10 @@ config X86_CPUID
+ with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
+ /dev/cpu/31/cpuid.
+
++config SWIOTLB
++ bool
++ default n
++
+ source "drivers/firmware/Kconfig"
+
+ choice
+@@ -616,6 +633,7 @@ config HIGHPTE
+
+ config MATH_EMULATION
+ bool "Math emulation"
++ depends on !X86_XEN
+ ---help---
+ Linux can emulate a math coprocessor (used for floating point
+ operations) if you don't have one. 486DX and Pentium processors have
+@@ -641,6 +659,8 @@ config MATH_EMULATION
+
+ config MTRR
+ bool "MTRR (Memory Type Range Register) support"
++ depends on !XEN_UNPRIVILEGED_GUEST
++ default y if X86_XEN
+ ---help---
+ On Intel P6 family processors (Pentium Pro, Pentium II and later)
+ the Memory Type Range Registers (MTRRs) may be used to control
+@@ -675,7 +695,7 @@ config MTRR
+
+ config EFI
+ bool "Boot from EFI support"
+- depends on ACPI
++ depends on ACPI && !X86_XEN
+ default n
+ ---help---
+ This enables the the kernel to boot on EFI platforms using
+@@ -693,7 +713,7 @@ config EFI
+
+ config IRQBALANCE
+ bool "Enable kernel irq balancing"
+- depends on SMP && X86_IO_APIC
++ depends on SMP && X86_IO_APIC && !X86_XEN
+ default y
+ help
+ The default yes will allow the kernel to do irq load balancing.
+@@ -741,7 +761,7 @@ source kernel/Kconfig.hz
+
+ config KEXEC
+ bool "kexec system call (EXPERIMENTAL)"
+- depends on EXPERIMENTAL
++ depends on EXPERIMENTAL && !XEN_UNPRIVILEGED_GUEST
+ help
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+@@ -793,6 +813,7 @@ config HOTPLUG_CPU
+
+ config COMPAT_VDSO
+ bool "Compat VDSO support"
++ depends on !X86_XEN
+ default y
+ help
+ Map the VDSO to the predictable old-style address too.
+@@ -810,18 +831,18 @@ config ARCH_ENABLE_MEMORY_HOTPLUG
+ depends on HIGHMEM
+
+ menu "Power management options (ACPI, APM)"
+- depends on !X86_VOYAGER
++ depends on !(X86_VOYAGER || XEN_UNPRIVILEGED_GUEST)
+
+-source kernel/power/Kconfig
++source "kernel/power/Kconfig"
+
+ source "drivers/acpi/Kconfig"
+
+ menu "APM (Advanced Power Management) BIOS Support"
+-depends on PM && !X86_VISWS
++depends on PM && !(X86_VISWS || X86_XEN)
+
+ config APM
+ tristate "APM (Advanced Power Management) BIOS support"
+- depends on PM
++ depends on PM && PM_LEGACY
+ ---help---
+ APM is a BIOS specification for saving power using several different
+ techniques. This is mostly useful for battery powered laptops with
+@@ -1006,6 +1027,7 @@ choice
+
+ config PCI_GOBIOS
+ bool "BIOS"
++ depends on !X86_XEN
+
+ config PCI_GOMMCONFIG
+ bool "MMConfig"
+@@ -1013,6 +1035,13 @@ config PCI_GOMMCONFIG
+ config PCI_GODIRECT
+ bool "Direct"
+
++config PCI_GOXEN_FE
++ bool "Xen PCI Frontend"
++ depends on X86_XEN
++ help
++ The PCI device frontend driver allows the kernel to import arbitrary
++ PCI devices from a PCI backend to support PCI driver domains.
++
+ config PCI_GOANY
+ bool "Any"
+
+@@ -1020,7 +1049,7 @@ endchoice
+
+ config PCI_BIOS
+ bool
+- depends on !X86_VISWS && PCI && (PCI_GOBIOS || PCI_GOANY)
++ depends on !(X86_VISWS || X86_XEN) && PCI && (PCI_GOBIOS || PCI_GOANY)
+ default y
+
+ config PCI_DIRECT
+@@ -1033,6 +1062,18 @@ config PCI_MMCONFIG
+ depends on PCI && ACPI && (PCI_GOMMCONFIG || PCI_GOANY)
+ default y
+
++config XEN_PCIDEV_FRONTEND
++ bool
++ depends on PCI && X86_XEN && (PCI_GOXEN_FE || PCI_GOANY)
++ default y
++
++config XEN_PCIDEV_FE_DEBUG
++ bool "Xen PCI Frontend Debugging"
++ depends on XEN_PCIDEV_FRONTEND
++ default n
++ help
++ Enables some debug statements within the PCI Frontend.
++
+ source "drivers/pci/pcie/Kconfig"
+
+ source "drivers/pci/Kconfig"
+@@ -1043,7 +1084,7 @@ config ISA_DMA_API
+
+ config ISA
+ bool "ISA support"
+- depends on !(X86_VOYAGER || X86_VISWS)
++ depends on !(X86_VOYAGER || X86_VISWS || X86_XEN)
+ help
+ Find out whether you have ISA slots on your motherboard. ISA is the
+ name of a bus system, i.e. the way the CPU talks to the other stuff
+@@ -1070,7 +1111,7 @@ config EISA
+ source "drivers/eisa/Kconfig"
+
+ config MCA
+- bool "MCA support" if !(X86_VISWS || X86_VOYAGER)
++ bool "MCA support" if !(X86_VISWS || X86_VOYAGER || X86_XEN)
+ default y if X86_VOYAGER
+ help
+ MicroChannel Architecture is found in some IBM PS/2 machines and
+@@ -1146,6 +1187,8 @@ source "security/Kconfig"
+
+ source "crypto/Kconfig"
+
++source "drivers/xen/Kconfig"
++
+ source "lib/Kconfig"
+
+ #
+@@ -1171,7 +1214,7 @@ config X86_SMP
+
+ config X86_HT
+ bool
+- depends on SMP && !(X86_VISWS || X86_VOYAGER)
++ depends on SMP && !(X86_VISWS || X86_VOYAGER || X86_XEN)
+ default y
+
+ config X86_BIOS_REBOOT
+@@ -1184,6 +1227,16 @@ config X86_TRAMPOLINE
+ depends on X86_SMP || (X86_VOYAGER && SMP)
+ default y
+
++config X86_NO_TSS
++ bool
++ depends on X86_XEN
++ default y
++
++config X86_NO_IDT
++ bool
++ depends on X86_XEN
++ default y
++
+ config KTIME_SCALAR
+ bool
+ default y
+diff -rpuN linux-2.6.18.8/arch/i386/Kconfig.cpu linux-2.6.18-xen-3.2.0/arch/i386/Kconfig.cpu
+--- linux-2.6.18.8/arch/i386/Kconfig.cpu 2007-02-23 15:52:30.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/Kconfig.cpu 2008-02-15 16:21:49.000000000 -0800
+@@ -252,7 +252,7 @@ config X86_PPRO_FENCE
+
+ config X86_F00F_BUG
+ bool
+- depends on M586MMX || M586TSC || M586 || M486 || M386
++ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !X86_NO_IDT
+ default y
+
+ config X86_WP_WORKS_OK
+@@ -312,5 +312,5 @@ config X86_OOSTORE
+
+ config X86_TSC
+ bool
+- depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1 || MGEODE_LX) && !X86_NUMAQ
++ depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1 || MGEODE_LX) && !X86_NUMAQ && !X86_XEN
+ default y
+diff -rpuN linux-2.6.18.8/arch/i386/Kconfig.debug linux-2.6.18-xen-3.2.0/arch/i386/Kconfig.debug
+--- linux-2.6.18.8/arch/i386/Kconfig.debug 2007-02-23 15:52:30.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/Kconfig.debug 2008-02-15 16:21:49.000000000 -0800
+@@ -79,6 +79,7 @@ config X86_MPPARSE
+ config DOUBLEFAULT
+ default y
+ bool "Enable doublefault exception handler" if EMBEDDED
++ depends on !X86_NO_TSS
+ help
+ This option allows trapping of rare doublefault exceptions that
+ would otherwise cause a system to silently reboot. Disabling this
+diff -rpuN linux-2.6.18.8/arch/i386/Makefile linux-2.6.18-xen-3.2.0/arch/i386/Makefile
+--- linux-2.6.18.8/arch/i386/Makefile 2007-02-23 15:52:30.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/Makefile 2008-02-15 16:21:49.000000000 -0800
+@@ -71,6 +71,10 @@ mcore-$(CONFIG_X86_BIGSMP) := mach-defau
+ mflags-$(CONFIG_X86_SUMMIT) := -Iinclude/asm-i386/mach-summit
+ mcore-$(CONFIG_X86_SUMMIT) := mach-default
+
++# Xen subarch support
++mflags-$(CONFIG_X86_XEN) := -Iinclude/asm-i386/mach-xen
++mcore-$(CONFIG_X86_XEN) := mach-xen
++
+ # generic subarchitecture
+ mflags-$(CONFIG_X86_GENERICARCH) := -Iinclude/asm-i386/mach-generic
+ mcore-$(CONFIG_X86_GENERICARCH) := mach-default
+@@ -102,9 +106,20 @@ AFLAGS += $(mflags-y)
+
+ boot := arch/i386/boot
+
+-PHONY += zImage bzImage compressed zlilo bzlilo \
++PHONY += zImage bzImage vmlinuz compressed zlilo bzlilo \
+ zdisk bzdisk fdimage fdimage144 fdimage288 isoimage install
+
++ifdef CONFIG_XEN
++CPPFLAGS := -D__XEN_INTERFACE_VERSION__=$(CONFIG_XEN_INTERFACE_VERSION) \
++ -Iinclude$(if $(KBUILD_SRC),2)/asm/mach-xen $(CPPFLAGS)
++all: vmlinuz
++
++# KBUILD_IMAGE specifies the target image being built
++KBUILD_IMAGE := $(boot)/vmlinuz
++
++vmlinuz: vmlinux
++ $(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE)
++else
+ all: bzImage
+
+ # KBUILD_IMAGE specify target image being built
+@@ -124,6 +139,7 @@ zdisk bzdisk: vmlinux
+
+ fdimage fdimage144 fdimage288 isoimage: vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@
++endif
+
+ install:
+ $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install
+diff -rpuN linux-2.6.18.8/arch/i386/boot/Makefile linux-2.6.18-xen-3.2.0/arch/i386/boot/Makefile
+--- linux-2.6.18.8/arch/i386/boot/Makefile 2007-02-23 15:52:30.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/boot/Makefile 2008-02-15 16:21:49.000000000 -0800
+@@ -26,7 +26,7 @@ SVGA_MODE := -DSVGA_MODE=NORMAL_VGA
+ #RAMDISK := -DRAMDISK=512
+
+ targets := vmlinux.bin bootsect bootsect.o \
+- setup setup.o zImage bzImage
++ setup setup.o zImage bzImage vmlinuz vmlinux-stripped
+ subdir- := compressed
+
+ hostprogs-y := tools/build
+@@ -133,5 +133,13 @@ zlilo: $(BOOTIMAGE)
+ cp System.map $(INSTALL_PATH)/
+ if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi
+
++$(obj)/vmlinuz: $(obj)/vmlinux-stripped FORCE
++ $(call if_changed,gzip)
++ @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
++
++$(obj)/vmlinux-stripped: OBJCOPYFLAGS := -g --strip-unneeded
++$(obj)/vmlinux-stripped: vmlinux FORCE
++ $(call if_changed,objcopy)
++
+ install:
+ sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)"
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/Makefile linux-2.6.18-xen-3.2.0/arch/i386/kernel/Makefile
+--- linux-2.6.18.8/arch/i386/kernel/Makefile 2007-02-23 15:52:30.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/Makefile 2008-02-15 16:21:49.000000000 -0800
+@@ -43,6 +43,7 @@ obj-$(CONFIG_K8_NB) += k8.o
+ EXTRA_AFLAGS := -traditional
+
+ obj-$(CONFIG_SCx200) += scx200.o
++obj-$(CONFIG_XEN) += fixup.o
+
+ # vsyscall.o contains the vsyscall DSO images as __initdata.
+ # We must build both images before we can assemble it.
+@@ -80,5 +81,8 @@ $(obj)/vsyscall-syms.o: $(src)/vsyscall.
+ $(obj)/vsyscall-sysenter.o $(obj)/vsyscall-note.o FORCE
+ $(call if_changed,syscall)
+
++early_printk-y += ../../x86_64/kernel/early_printk.o
+ k8-y += ../../x86_64/kernel/k8.o
+
++disabled-obj-$(CONFIG_XEN) := i8259.o reboot.o smpboot.o trampoline.o
++%/head.o %/head.s: $(if $(CONFIG_XEN),EXTRA_AFLAGS,dummy) :=
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/acpi/Makefile linux-2.6.18-xen-3.2.0/arch/i386/kernel/acpi/Makefile
+--- linux-2.6.18.8/arch/i386/kernel/acpi/Makefile 2007-02-23 15:52:30.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/acpi/Makefile 2008-02-15 16:21:49.000000000 -0800
+@@ -6,3 +6,4 @@ ifneq ($(CONFIG_ACPI_PROCESSOR),)
+ obj-y += cstate.o processor.o
+ endif
+
++disabled-obj-$(CONFIG_XEN) := cstate.o wakeup.o
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/acpi/boot-xen.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/acpi/boot-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/acpi/boot-xen.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/acpi/boot-xen.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,1168 @@
++/*
++ * boot.c - Architecture-Specific Low-Level ACPI Boot Support
++ *
++ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
++ * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ */
++
++#include <linux/init.h>
++#include <linux/acpi.h>
++#include <linux/efi.h>
++#include <linux/module.h>
++#include <linux/dmi.h>
++#include <linux/irq.h>
++
++#include <asm/pgtable.h>
++#include <asm/io_apic.h>
++#include <asm/apic.h>
++#include <asm/io.h>
++#include <asm/mpspec.h>
++
++#ifdef CONFIG_X86_64
++
++extern void __init clustered_apic_check(void);
++
++extern int gsi_irq_sharing(int gsi);
++#include <asm/proto.h>
++
++static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; }
++
++
++#else /* X86 */
++
++#ifdef CONFIG_X86_LOCAL_APIC
++#include <mach_apic.h>
++#include <mach_mpparse.h>
++#endif /* CONFIG_X86_LOCAL_APIC */
++
++static inline int gsi_irq_sharing(int gsi) { return gsi; }
++
++#endif /* X86 */
++
++#define BAD_MADT_ENTRY(entry, end) ( \
++ (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
++ ((acpi_table_entry_header *)entry)->length < sizeof(*entry))
++
++#define PREFIX "ACPI: "
++
++int acpi_noirq __initdata; /* skip ACPI IRQ initialization */
++int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */
++int acpi_ht __initdata = 1; /* enable HT */
++
++int acpi_lapic;
++int acpi_ioapic;
++int acpi_strict;
++EXPORT_SYMBOL(acpi_strict);
++
++acpi_interrupt_flags acpi_sci_flags __initdata;
++int acpi_sci_override_gsi __initdata;
++int acpi_skip_timer_override __initdata;
++
++#ifdef CONFIG_X86_LOCAL_APIC
++static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
++#endif
++
++#ifndef __HAVE_ARCH_CMPXCHG
++#warning ACPI uses CMPXCHG, i486 and later hardware
++#endif
++
++#define MAX_MADT_ENTRIES 256
++u8 x86_acpiid_to_apicid[MAX_MADT_ENTRIES] =
++ {[0 ... MAX_MADT_ENTRIES - 1] = 0xff };
++EXPORT_SYMBOL(x86_acpiid_to_apicid);
++
++/* --------------------------------------------------------------------------
++ Boot-time Configuration
++ -------------------------------------------------------------------------- */
++
++/*
++ * The default interrupt routing model is PIC (8259). This gets
++ * overriden if IOAPICs are enumerated (below).
++ */
++enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
++
++#if defined(CONFIG_X86_64) && !defined(CONFIG_XEN)
++
++/* rely on all ACPI tables being in the direct mapping */
++char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
++{
++ if (!phys_addr || !size)
++ return NULL;
++
++ if (phys_addr+size <= (end_pfn_map << PAGE_SHIFT) + PAGE_SIZE)
++ return __va(phys_addr);
++
++ return NULL;
++}
++
++#else
++
++/*
++ * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
++ * to map the target physical address. The problem is that set_fixmap()
++ * provides a single page, and it is possible that the page is not
++ * sufficient.
++ * By using this area, we can map up to MAX_IO_APICS pages temporarily,
++ * i.e. until the next __va_range() call.
++ *
++ * Important Safety Note: The fixed I/O APIC page numbers are *subtracted*
++ * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and
++ * count idx down while incrementing the phys address.
++ */
++char *__acpi_map_table(unsigned long phys, unsigned long size)
++{
++ unsigned long base, offset, mapped_size;
++ int idx;
++
++#ifndef CONFIG_XEN
++ if (phys + size < 8 * 1024 * 1024)
++ return __va(phys);
++#endif
++
++ offset = phys & (PAGE_SIZE - 1);
++ mapped_size = PAGE_SIZE - offset;
++ set_fixmap(FIX_ACPI_END, phys);
++ base = fix_to_virt(FIX_ACPI_END);
++
++ /*
++ * Most cases can be covered by the below.
++ */
++ idx = FIX_ACPI_END;
++ while (mapped_size < size) {
++ if (--idx < FIX_ACPI_BEGIN)
++ return NULL; /* cannot handle this */
++ phys += PAGE_SIZE;
++ set_fixmap(idx, phys);
++ mapped_size += PAGE_SIZE;
++ }
++
++ return ((unsigned char *)base + offset);
++}
++#endif
++
++#ifdef CONFIG_PCI_MMCONFIG
++/* The physical address of the MMCONFIG aperture. Set from ACPI tables. */
++struct acpi_table_mcfg_config *pci_mmcfg_config;
++int pci_mmcfg_config_num;
++
++int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
++{
++ struct acpi_table_mcfg *mcfg;
++ unsigned long i;
++ int config_size;
++
++ if (!phys_addr || !size)
++ return -EINVAL;
++
++ mcfg = (struct acpi_table_mcfg *)__acpi_map_table(phys_addr, size);
++ if (!mcfg) {
++ printk(KERN_WARNING PREFIX "Unable to map MCFG\n");
++ return -ENODEV;
++ }
++
++ /* how many config structures do we have */
++ pci_mmcfg_config_num = 0;
++ i = size - sizeof(struct acpi_table_mcfg);
++ while (i >= sizeof(struct acpi_table_mcfg_config)) {
++ ++pci_mmcfg_config_num;
++ i -= sizeof(struct acpi_table_mcfg_config);
++ };
++ if (pci_mmcfg_config_num == 0) {
++ printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
++ return -ENODEV;
++ }
++
++ config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config);
++ pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL);
++ if (!pci_mmcfg_config) {
++ printk(KERN_WARNING PREFIX
++ "No memory for MCFG config tables\n");
++ return -ENOMEM;
++ }
++
++ memcpy(pci_mmcfg_config, &mcfg->config, config_size);
++ for (i = 0; i < pci_mmcfg_config_num; ++i) {
++ if (mcfg->config[i].base_reserved) {
++ printk(KERN_ERR PREFIX
++ "MMCONFIG not in low 4GB of memory\n");
++ kfree(pci_mmcfg_config);
++ pci_mmcfg_config_num = 0;
++ return -ENODEV;
++ }
++ }
++
++ return 0;
++}
++#endif /* CONFIG_PCI_MMCONFIG */
++
++#ifdef CONFIG_X86_LOCAL_APIC
++static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
++{
++ struct acpi_table_madt *madt = NULL;
++
++ if (!phys_addr || !size || !cpu_has_apic)
++ return -EINVAL;
++
++ madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size);
++ if (!madt) {
++ printk(KERN_WARNING PREFIX "Unable to map MADT\n");
++ return -ENODEV;
++ }
++
++ if (madt->lapic_address) {
++ acpi_lapic_addr = (u64) madt->lapic_address;
++
++ printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
++ madt->lapic_address);
++ }
++
++ acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
++
++ return 0;
++}
++
++static int __init
++acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end)
++{
++ struct acpi_table_lapic *processor = NULL;
++
++ processor = (struct acpi_table_lapic *)header;
++
++ if (BAD_MADT_ENTRY(processor, end))
++ return -EINVAL;
++
++ acpi_table_print_madt_entry(header);
++
++ /* Record local apic id only when enabled */
++ if (processor->flags.enabled)
++ x86_acpiid_to_apicid[processor->acpi_id] = processor->id;
++
++ /*
++ * We need to register disabled CPU as well to permit
++ * counting disabled CPUs. This allows us to size
++ * cpus_possible_map more accurately, to permit
++ * to not preallocating memory for all NR_CPUS
++ * when we use CPU hotplug.
++ */
++ mp_register_lapic(processor->id, /* APIC ID */
++ processor->flags.enabled); /* Enabled? */
++
++ return 0;
++}
++
++static int __init
++acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
++ const unsigned long end)
++{
++ struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL;
++
++ lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr *)header;
++
++ if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
++ return -EINVAL;
++
++ acpi_lapic_addr = lapic_addr_ovr->address;
++
++ return 0;
++}
++
++static int __init
++acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
++{
++ struct acpi_table_lapic_nmi *lapic_nmi = NULL;
++
++ lapic_nmi = (struct acpi_table_lapic_nmi *)header;
++
++ if (BAD_MADT_ENTRY(lapic_nmi, end))
++ return -EINVAL;
++
++ acpi_table_print_madt_entry(header);
++
++ if (lapic_nmi->lint != 1)
++ printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
++
++ return 0;
++}
++
++#endif /*CONFIG_X86_LOCAL_APIC */
++
++#ifdef CONFIG_X86_IO_APIC
++
++static int __init
++acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end)
++{
++ struct acpi_table_ioapic *ioapic = NULL;
++
++ ioapic = (struct acpi_table_ioapic *)header;
++
++ if (BAD_MADT_ENTRY(ioapic, end))
++ return -EINVAL;
++
++ acpi_table_print_madt_entry(header);
++
++ mp_register_ioapic(ioapic->id,
++ ioapic->address, ioapic->global_irq_base);
++
++ return 0;
++}
++
++/*
++ * Parse Interrupt Source Override for the ACPI SCI
++ */
++static void acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
++{
++ if (trigger == 0) /* compatible SCI trigger is level */
++ trigger = 3;
++
++ if (polarity == 0) /* compatible SCI polarity is low */
++ polarity = 3;
++
++ /* Command-line over-ride via acpi_sci= */
++ if (acpi_sci_flags.trigger)
++ trigger = acpi_sci_flags.trigger;
++
++ if (acpi_sci_flags.polarity)
++ polarity = acpi_sci_flags.polarity;
++
++ /*
++ * mp_config_acpi_legacy_irqs() already setup IRQs < 16
++ * If GSI is < 16, this will update its flags,
++ * else it will create a new mp_irqs[] entry.
++ */
++ mp_override_legacy_irq(gsi, polarity, trigger, gsi);
++
++ /*
++ * stash over-ride to indicate we've been here
++ * and for later update of acpi_fadt
++ */
++ acpi_sci_override_gsi = gsi;
++ return;
++}
++
++static int __init
++acpi_parse_int_src_ovr(acpi_table_entry_header * header,
++ const unsigned long end)
++{
++ struct acpi_table_int_src_ovr *intsrc = NULL;
++
++ intsrc = (struct acpi_table_int_src_ovr *)header;
++
++ if (BAD_MADT_ENTRY(intsrc, end))
++ return -EINVAL;
++
++ acpi_table_print_madt_entry(header);
++
++ if (intsrc->bus_irq == acpi_fadt.sci_int) {
++ acpi_sci_ioapic_setup(intsrc->global_irq,
++ intsrc->flags.polarity,
++ intsrc->flags.trigger);
++ return 0;
++ }
++
++ if (acpi_skip_timer_override &&
++ intsrc->bus_irq == 0 && intsrc->global_irq == 2) {
++ printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
++ return 0;
++ }
++
++ mp_override_legacy_irq(intsrc->bus_irq,
++ intsrc->flags.polarity,
++ intsrc->flags.trigger, intsrc->global_irq);
++
++ return 0;
++}
++
++static int __init
++acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end)
++{
++ struct acpi_table_nmi_src *nmi_src = NULL;
++
++ nmi_src = (struct acpi_table_nmi_src *)header;
++
++ if (BAD_MADT_ENTRY(nmi_src, end))
++ return -EINVAL;
++
++ acpi_table_print_madt_entry(header);
++
++ /* TBD: Support nimsrc entries? */
++
++ return 0;
++}
++
++#endif /* CONFIG_X86_IO_APIC */
++
++/*
++ * acpi_pic_sci_set_trigger()
++ *
++ * use ELCR to set PIC-mode trigger type for SCI
++ *
++ * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
++ * it may require Edge Trigger -- use "acpi_sci=edge"
++ *
++ * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
++ * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge.
++ * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0)
++ * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0)
++ */
++
++void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
++{
++ unsigned int mask = 1 << irq;
++ unsigned int old, new;
++
++ /* Real old ELCR mask */
++ old = inb(0x4d0) | (inb(0x4d1) << 8);
++
++ /*
++ * If we use ACPI to set PCI irq's, then we should clear ELCR
++ * since we will set it correctly as we enable the PCI irq
++ * routing.
++ */
++ new = acpi_noirq ? old : 0;
++
++ /*
++ * Update SCI information in the ELCR, it isn't in the PCI
++ * routing tables..
++ */
++ switch (trigger) {
++ case 1: /* Edge - clear */
++ new &= ~mask;
++ break;
++ case 3: /* Level - set */
++ new |= mask;
++ break;
++ }
++
++ if (old == new)
++ return;
++
++ printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
++ outb(new, 0x4d0);
++ outb(new >> 8, 0x4d1);
++}
++
++int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
++{
++#ifdef CONFIG_X86_IO_APIC
++ if (use_pci_vector() && !platform_legacy_irq(gsi))
++ *irq = IO_APIC_VECTOR(gsi);
++ else
++#endif
++ *irq = gsi_irq_sharing(gsi);
++ return 0;
++}
++
++/*
++ * success: return IRQ number (>=0)
++ * failure: return < 0
++ */
++int acpi_register_gsi(u32 gsi, int triggering, int polarity)
++{
++ unsigned int irq;
++ unsigned int plat_gsi = gsi;
++
++#ifdef CONFIG_PCI
++ /*
++ * Make sure all (legacy) PCI IRQs are set as level-triggered.
++ */
++ if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
++ extern void eisa_set_level_irq(unsigned int irq);
++
++ if (triggering == ACPI_LEVEL_SENSITIVE)
++ eisa_set_level_irq(gsi);
++ }
++#endif
++
++#ifdef CONFIG_X86_IO_APIC
++ if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
++ plat_gsi = mp_register_gsi(gsi, triggering, polarity);
++ }
++#endif
++ acpi_gsi_to_irq(plat_gsi, &irq);
++ return irq;
++}
++
++EXPORT_SYMBOL(acpi_register_gsi);
++
++/*
++ * ACPI based hotplug support for CPU
++ */
++#ifdef CONFIG_ACPI_HOTPLUG_CPU
++int acpi_map_lsapic(acpi_handle handle, int *pcpu)
++{
++ /* TBD */
++ return -EINVAL;
++}
++
++EXPORT_SYMBOL(acpi_map_lsapic);
++
++int acpi_unmap_lsapic(int cpu)
++{
++ /* TBD */
++ return -EINVAL;
++}
++
++EXPORT_SYMBOL(acpi_unmap_lsapic);
++#endif /* CONFIG_ACPI_HOTPLUG_CPU */
++
++int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
++{
++ /* TBD */
++ return -EINVAL;
++}
++
++EXPORT_SYMBOL(acpi_register_ioapic);
++
++int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
++{
++ /* TBD */
++ return -EINVAL;
++}
++
++EXPORT_SYMBOL(acpi_unregister_ioapic);
++
++static unsigned long __init
++acpi_scan_rsdp(unsigned long start, unsigned long length)
++{
++ unsigned long offset = 0;
++ unsigned long sig_len = sizeof("RSD PTR ") - 1;
++ unsigned long vstart = (unsigned long)isa_bus_to_virt(start);
++
++ /*
++ * Scan all 16-byte boundaries of the physical memory region for the
++ * RSDP signature.
++ */
++ for (offset = 0; offset < length; offset += 16) {
++ if (strncmp((char *)(vstart + offset), "RSD PTR ", sig_len))
++ continue;
++ return (start + offset);
++ }
++
++ return 0;
++}
++
++static int __init acpi_parse_sbf(unsigned long phys_addr, unsigned long size)
++{
++ struct acpi_table_sbf *sb;
++
++ if (!phys_addr || !size)
++ return -EINVAL;
++
++ sb = (struct acpi_table_sbf *)__acpi_map_table(phys_addr, size);
++ if (!sb) {
++ printk(KERN_WARNING PREFIX "Unable to map SBF\n");
++ return -ENODEV;
++ }
++
++ sbf_port = sb->sbf_cmos; /* Save CMOS port */
++
++ return 0;
++}
++
++#ifdef CONFIG_HPET_TIMER
++
++static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
++{
++ struct acpi_table_hpet *hpet_tbl;
++
++ if (!phys || !size)
++ return -EINVAL;
++
++ hpet_tbl = (struct acpi_table_hpet *)__acpi_map_table(phys, size);
++ if (!hpet_tbl) {
++ printk(KERN_WARNING PREFIX "Unable to map HPET\n");
++ return -ENODEV;
++ }
++
++ if (hpet_tbl->addr.space_id != ACPI_SPACE_MEM) {
++ printk(KERN_WARNING PREFIX "HPET timers must be located in "
++ "memory.\n");
++ return -1;
++ }
++#ifdef CONFIG_X86_64
++ vxtime.hpet_address = hpet_tbl->addr.addrl |
++ ((long)hpet_tbl->addr.addrh << 32);
++
++ printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
++ hpet_tbl->id, vxtime.hpet_address);
++#else /* X86 */
++ {
++ extern unsigned long hpet_address;
++
++ hpet_address = hpet_tbl->addr.addrl;
++ printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
++ hpet_tbl->id, hpet_address);
++ }
++#endif /* X86 */
++
++ return 0;
++}
++#else
++#define acpi_parse_hpet NULL
++#endif
++
++#ifdef CONFIG_X86_PM_TIMER
++extern u32 pmtmr_ioport;
++#endif
++
++static int __init acpi_parse_fadt(unsigned long phys, unsigned long size)
++{
++ struct fadt_descriptor *fadt = NULL;
++
++ fadt = (struct fadt_descriptor *)__acpi_map_table(phys, size);
++ if (!fadt) {
++ printk(KERN_WARNING PREFIX "Unable to map FADT\n");
++ return 0;
++ }
++ /* initialize sci_int early for INT_SRC_OVR MADT parsing */
++ acpi_fadt.sci_int = fadt->sci_int;
++
++ /* initialize rev and apic_phys_dest_mode for x86_64 genapic */
++ acpi_fadt.revision = fadt->revision;
++ acpi_fadt.force_apic_physical_destination_mode =
++ fadt->force_apic_physical_destination_mode;
++
++#if defined(CONFIG_X86_PM_TIMER) && !defined(CONFIG_XEN)
++ /* detect the location of the ACPI PM Timer */
++ if (fadt->revision >= FADT2_REVISION_ID) {
++ /* FADT rev. 2 */
++ if (fadt->xpm_tmr_blk.address_space_id !=
++ ACPI_ADR_SPACE_SYSTEM_IO)
++ return 0;
++
++ pmtmr_ioport = fadt->xpm_tmr_blk.address;
++ /*
++ * "X" fields are optional extensions to the original V1.0
++ * fields, so we must selectively expand V1.0 fields if the
++ * corresponding X field is zero.
++ */
++ if (!pmtmr_ioport)
++ pmtmr_ioport = fadt->V1_pm_tmr_blk;
++ } else {
++ /* FADT rev. 1 */
++ pmtmr_ioport = fadt->V1_pm_tmr_blk;
++ }
++ if (pmtmr_ioport)
++ printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
++ pmtmr_ioport);
++#endif
++ return 0;
++}
++
++unsigned long __init acpi_find_rsdp(void)
++{
++ unsigned long rsdp_phys = 0;
++
++ if (efi_enabled) {
++ if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
++ return efi.acpi20;
++ else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
++ return efi.acpi;
++ }
++ /*
++ * Scan memory looking for the RSDP signature. First search EBDA (low
++ * memory) paragraphs and then search upper memory (E0000-FFFFF).
++ */
++ rsdp_phys = acpi_scan_rsdp(0, 0x400);
++ if (!rsdp_phys)
++ rsdp_phys = acpi_scan_rsdp(0xE0000, 0x20000);
++
++ return rsdp_phys;
++}
++
++#ifdef CONFIG_X86_LOCAL_APIC
++/*
++ * Parse LAPIC entries in MADT
++ * returns 0 on success, < 0 on error
++ */
++static int __init acpi_parse_madt_lapic_entries(void)
++{
++ int count;
++
++ if (!cpu_has_apic)
++ return -ENODEV;
++
++ /*
++ * Note that the LAPIC address is obtained from the MADT (32-bit value)
++ * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
++ */
++
++ count =
++ acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR,
++ acpi_parse_lapic_addr_ovr, 0);
++ if (count < 0) {
++ printk(KERN_ERR PREFIX
++ "Error parsing LAPIC address override entry\n");
++ return count;
++ }
++
++ mp_register_lapic_address(acpi_lapic_addr);
++
++ count = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic,
++ MAX_APICS);
++ if (!count) {
++ printk(KERN_ERR PREFIX "No LAPIC entries present\n");
++ /* TBD: Cleanup to allow fallback to MPS */
++ return -ENODEV;
++ } else if (count < 0) {
++ printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
++ /* TBD: Cleanup to allow fallback to MPS */
++ return count;
++ }
++
++ count =
++ acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0);
++ if (count < 0) {
++ printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
++ /* TBD: Cleanup to allow fallback to MPS */
++ return count;
++ }
++ return 0;
++}
++#endif /* CONFIG_X86_LOCAL_APIC */
++
++#ifdef CONFIG_X86_IO_APIC
++/*
++ * Parse IOAPIC related entries in MADT
++ * returns 0 on success, < 0 on error
++ */
++static int __init acpi_parse_madt_ioapic_entries(void)
++{
++ int count;
++
++ /*
++ * ACPI interpreter is required to complete interrupt setup,
++ * so if it is off, don't enumerate the io-apics with ACPI.
++ * If MPS is present, it will handle them,
++ * otherwise the system will stay in PIC mode
++ */
++ if (acpi_disabled || acpi_noirq) {
++ return -ENODEV;
++ }
++
++ if (!cpu_has_apic)
++ return -ENODEV;
++
++ /*
++ * if "noapic" boot option, don't look for IO-APICs
++ */
++ if (skip_ioapic_setup) {
++ printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
++ "due to 'noapic' option.\n");
++ return -ENODEV;
++ }
++
++ count =
++ acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic,
++ MAX_IO_APICS);
++ if (!count) {
++ printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
++ return -ENODEV;
++ } else if (count < 0) {
++ printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
++ return count;
++ }
++
++ count =
++ acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr,
++ NR_IRQ_VECTORS);
++ if (count < 0) {
++ printk(KERN_ERR PREFIX
++ "Error parsing interrupt source overrides entry\n");
++ /* TBD: Cleanup to allow fallback to MPS */
++ return count;
++ }
++
++ /*
++ * If BIOS did not supply an INT_SRC_OVR for the SCI
++ * pretend we got one so we can set the SCI flags.
++ */
++ if (!acpi_sci_override_gsi)
++ acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0);
++
++ /* Fill in identity legacy mapings where no override */
++ mp_config_acpi_legacy_irqs();
++
++ count =
++ acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src,
++ NR_IRQ_VECTORS);
++ if (count < 0) {
++ printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
++ /* TBD: Cleanup to allow fallback to MPS */
++ return count;
++ }
++
++ return 0;
++}
++#else
++static inline int acpi_parse_madt_ioapic_entries(void)
++{
++ return -1;
++}
++#endif /* !CONFIG_X86_IO_APIC */
++
++static void __init acpi_process_madt(void)
++{
++#ifdef CONFIG_X86_LOCAL_APIC
++ int count, error;
++
++ count = acpi_table_parse(ACPI_APIC, acpi_parse_madt);
++ if (count >= 1) {
++
++ /*
++ * Parse MADT LAPIC entries
++ */
++ error = acpi_parse_madt_lapic_entries();
++ if (!error) {
++ acpi_lapic = 1;
++
++#ifdef CONFIG_X86_GENERICARCH
++ generic_bigsmp_probe();
++#endif
++ /*
++ * Parse MADT IO-APIC entries
++ */
++ error = acpi_parse_madt_ioapic_entries();
++ if (!error) {
++ acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
++ acpi_irq_balance_set(NULL);
++ acpi_ioapic = 1;
++
++ smp_found_config = 1;
++ clustered_apic_check();
++ }
++ }
++ if (error == -EINVAL) {
++ /*
++ * Dell Precision Workstation 410, 610 come here.
++ */
++ printk(KERN_ERR PREFIX
++ "Invalid BIOS MADT, disabling ACPI\n");
++ disable_acpi();
++ }
++ }
++#endif
++ return;
++}
++
++extern int acpi_force;
++
++#ifdef __i386__
++
++static int __init disable_acpi_irq(struct dmi_system_id *d)
++{
++ if (!acpi_force) {
++ printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
++ d->ident);
++ acpi_noirq_set();
++ }
++ return 0;
++}
++
++static int __init disable_acpi_pci(struct dmi_system_id *d)
++{
++ if (!acpi_force) {
++ printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
++ d->ident);
++ acpi_disable_pci();
++ }
++ return 0;
++}
++
++static int __init dmi_disable_acpi(struct dmi_system_id *d)
++{
++ if (!acpi_force) {
++ printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
++ disable_acpi();
++ } else {
++ printk(KERN_NOTICE
++ "Warning: DMI blacklist says broken, but acpi forced\n");
++ }
++ return 0;
++}
++
++/*
++ * Limit ACPI to CPU enumeration for HT
++ */
++static int __init force_acpi_ht(struct dmi_system_id *d)
++{
++ if (!acpi_force) {
++ printk(KERN_NOTICE "%s detected: force use of acpi=ht\n",
++ d->ident);
++ disable_acpi();
++ acpi_ht = 1;
++ } else {
++ printk(KERN_NOTICE
++ "Warning: acpi=force overrules DMI blacklist: acpi=ht\n");
++ }
++ return 0;
++}
++
++/*
++ * If your system is blacklisted here, but you find that acpi=force
++ * works for you, please contact acpi-devel@sourceforge.net
++ */
++static struct dmi_system_id __initdata acpi_dmi_table[] = {
++ /*
++ * Boxes that need ACPI disabled
++ */
++ {
++ .callback = dmi_disable_acpi,
++ .ident = "IBM Thinkpad",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
++ DMI_MATCH(DMI_BOARD_NAME, "2629H1G"),
++ },
++ },
++
++ /*
++ * Boxes that need acpi=ht
++ */
++ {
++ .callback = force_acpi_ht,
++ .ident = "FSC Primergy T850",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "DELL GX240",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Dell Computer Corporation"),
++ DMI_MATCH(DMI_BOARD_NAME, "OptiPlex GX240"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "HP VISUALIZE NT Workstation",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "Compaq Workstation W8000",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "ASUS P4B266",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "P4B266"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "ASUS P2B-DS",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "ASUS CUR-DLS",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "ABIT i440BX-W83977",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"),
++ DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "IBM Bladecenter",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
++ DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "IBM eServer xSeries 360",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
++ DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "IBM eserver xSeries 330",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
++ DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "IBM eserver xSeries 440",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"),
++ },
++ },
++
++ /*
++ * Boxes that need ACPI PCI IRQ routing disabled
++ */
++ {
++ .callback = disable_acpi_irq,
++ .ident = "ASUS A7V",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
++ DMI_MATCH(DMI_BOARD_NAME, "<A7V>"),
++ /* newer BIOS, Revision 1011, does work */
++ DMI_MATCH(DMI_BIOS_VERSION,
++ "ASUS A7V ACPI BIOS Revision 1007"),
++ },
++ },
++
++ /*
++ * Boxes that need ACPI PCI IRQ routing and PCI scan disabled
++ */
++ { /* _BBN 0 bug */
++ .callback = disable_acpi_pci,
++ .ident = "ASUS PR-DLS",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"),
++ DMI_MATCH(DMI_BIOS_VERSION,
++ "ASUS PR-DLS ACPI BIOS Revision 1010"),
++ DMI_MATCH(DMI_BIOS_DATE, "03/21/2003")
++ },
++ },
++ {
++ .callback = disable_acpi_pci,
++ .ident = "Acer TravelMate 36x Laptop",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
++ },
++ },
++ {}
++};
++
++#endif /* __i386__ */
++
++/*
++ * acpi_boot_table_init() and acpi_boot_init()
++ * called from setup_arch(), always.
++ * 1. checksums all tables
++ * 2. enumerates lapics
++ * 3. enumerates io-apics
++ *
++ * acpi_table_init() is separate to allow reading SRAT without
++ * other side effects.
++ *
++ * side effects of acpi_boot_init:
++ * acpi_lapic = 1 if LAPIC found
++ * acpi_ioapic = 1 if IOAPIC found
++ * if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
++ * if acpi_blacklisted() acpi_disabled = 1;
++ * acpi_irq_model=...
++ * ...
++ *
++ * return value: (currently ignored)
++ * 0: success
++ * !0: failure
++ */
++
++int __init acpi_boot_table_init(void)
++{
++ int error;
++
++#ifdef __i386__
++ dmi_check_system(acpi_dmi_table);
++#endif
++
++ /*
++ * If acpi_disabled, bail out
++ * One exception: acpi=ht continues far enough to enumerate LAPICs
++ */
++ if (acpi_disabled && !acpi_ht)
++ return 1;
++
++ /*
++ * Initialize the ACPI boot-time table parser.
++ */
++ error = acpi_table_init();
++ if (error) {
++ disable_acpi();
++ return error;
++ }
++
++ acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
++
++ /*
++ * blacklist may disable ACPI entirely
++ */
++ error = acpi_blacklisted();
++ if (error) {
++ if (acpi_force) {
++ printk(KERN_WARNING PREFIX "acpi=force override\n");
++ } else {
++ printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
++ disable_acpi();
++ return error;
++ }
++ }
++
++ return 0;
++}
++
++int __init acpi_boot_init(void)
++{
++ /*
++ * If acpi_disabled, bail out
++ * One exception: acpi=ht continues far enough to enumerate LAPICs
++ */
++ if (acpi_disabled && !acpi_ht)
++ return 1;
++
++ acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
++
++ /*
++ * set sci_int and PM timer address
++ */
++ acpi_table_parse(ACPI_FADT, acpi_parse_fadt);
++
++ /*
++ * Process the Multiple APIC Description Table (MADT), if present
++ */
++ acpi_process_madt();
++
++ acpi_table_parse(ACPI_HPET, acpi_parse_hpet);
++
++ return 0;
++}
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/acpi/sleep-xen.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/acpi/sleep-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/acpi/sleep-xen.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/acpi/sleep-xen.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,134 @@
++/*
++ * sleep.c - x86-specific ACPI sleep support.
++ *
++ * Copyright (C) 2001-2003 Patrick Mochel
++ * Copyright (C) 2001-2003 Pavel Machek <pavel@suse.cz>
++ */
++
++#include <linux/acpi.h>
++#include <linux/bootmem.h>
++#include <linux/dmi.h>
++#include <linux/cpumask.h>
++
++#include <asm/smp.h>
++
++#ifndef CONFIG_ACPI_PV_SLEEP
++/* address in low memory of the wakeup routine. */
++unsigned long acpi_wakeup_address = 0;
++unsigned long acpi_video_flags;
++extern char wakeup_start, wakeup_end;
++
++extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long));
++#endif
++
++/**
++ * acpi_save_state_mem - save kernel state
++ *
++ * Create an identity mapped page table and copy the wakeup routine to
++ * low memory.
++ */
++int acpi_save_state_mem(void)
++{
++#ifndef CONFIG_ACPI_PV_SLEEP
++ if (!acpi_wakeup_address)
++ return 1;
++ memcpy((void *)acpi_wakeup_address, &wakeup_start,
++ &wakeup_end - &wakeup_start);
++ acpi_copy_wakeup_routine(acpi_wakeup_address);
++#endif
++ return 0;
++}
++
++/*
++ * acpi_restore_state - undo effects of acpi_save_state_mem
++ */
++void acpi_restore_state_mem(void)
++{
++}
++
++/**
++ * acpi_reserve_bootmem - do _very_ early ACPI initialisation
++ *
++ * We allocate a page from the first 1MB of memory for the wakeup
++ * routine for when we come back from a sleep state. The
++ * runtime allocator allows specification of <16MB pages, but not
++ * <1MB pages.
++ */
++void __init acpi_reserve_bootmem(void)
++{
++#ifndef CONFIG_ACPI_PV_SLEEP
++ if ((&wakeup_end - &wakeup_start) > PAGE_SIZE) {
++ printk(KERN_ERR
++ "ACPI: Wakeup code way too big, S3 disabled.\n");
++ return;
++ }
++
++ acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE);
++ if (!acpi_wakeup_address)
++ printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
++#endif
++}
++
++#ifndef CONFIG_ACPI_PV_SLEEP
++static int __init acpi_sleep_setup(char *str)
++{
++ while ((str != NULL) && (*str != '\0')) {
++ if (strncmp(str, "s3_bios", 7) == 0)
++ acpi_video_flags = 1;
++ if (strncmp(str, "s3_mode", 7) == 0)
++ acpi_video_flags |= 2;
++ str = strchr(str, ',');
++ if (str != NULL)
++ str += strspn(str, ", \t");
++ }
++ return 1;
++}
++
++__setup("acpi_sleep=", acpi_sleep_setup);
++
++static __init int reset_videomode_after_s3(struct dmi_system_id *d)
++{
++ acpi_video_flags |= 2;
++ return 0;
++}
++
++static __initdata struct dmi_system_id acpisleep_dmi_table[] = {
++ { /* Reset video mode after returning from ACPI S3 sleep */
++ .callback = reset_videomode_after_s3,
++ .ident = "Toshiba Satellite 4030cdt",
++ .matches = {
++ DMI_MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"),
++ },
++ },
++ {}
++};
++
++static int __init acpisleep_dmi_init(void)
++{
++ dmi_check_system(acpisleep_dmi_table);
++ return 0;
++}
++
++core_initcall(acpisleep_dmi_init);
++
++#else /* CONFIG_ACPI_PV_SLEEP */
++#include <asm/hypervisor.h>
++#include <xen/interface/platform.h>
++int acpi_notify_hypervisor_state(u8 sleep_state,
++ u32 pm1a_cnt, u32 pm1b_cnt)
++{
++ struct xen_platform_op op = {
++ .cmd = XENPF_enter_acpi_sleep,
++ .interface_version = XENPF_INTERFACE_VERSION,
++ .u = {
++ .enter_acpi_sleep = {
++ .pm1a_cnt_val = (u16)pm1a_cnt,
++ .pm1b_cnt_val = (u16)pm1b_cnt,
++ .sleep_state = sleep_state,
++ },
++ },
++ };
++
++ return HYPERVISOR_platform_op(&op);
++}
++#endif /* CONFIG_ACPI_PV_SLEEP */
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/apic-xen.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/apic-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/apic-xen.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/apic-xen.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,155 @@
++/*
++ * Local APIC handling, local APIC timers
++ *
++ * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
++ *
++ * Fixes
++ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
++ * thanks to Eric Gilmore
++ * and Rolf G. Tews
++ * for testing these extensively.
++ * Maciej W. Rozycki : Various updates and fixes.
++ * Mikael Pettersson : Power Management for UP-APIC.
++ * Pavel Machek and
++ * Mikael Pettersson : PM converted to driver model.
++ */
++
++#include <linux/init.h>
++
++#include <linux/mm.h>
++#include <linux/delay.h>
++#include <linux/bootmem.h>
++#include <linux/smp_lock.h>
++#include <linux/interrupt.h>
++#include <linux/mc146818rtc.h>
++#include <linux/kernel_stat.h>
++#include <linux/sysdev.h>
++#include <linux/cpu.h>
++#include <linux/module.h>
++
++#include <asm/atomic.h>
++#include <asm/smp.h>
++#include <asm/mtrr.h>
++#include <asm/mpspec.h>
++#include <asm/desc.h>
++#include <asm/arch_hooks.h>
++#include <asm/hpet.h>
++#include <asm/i8253.h>
++#include <asm/nmi.h>
++
++#include <mach_apic.h>
++#include <mach_apicdef.h>
++#include <mach_ipi.h>
++
++#include "io_ports.h"
++
++#ifndef CONFIG_XEN
++/*
++ * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
++ * IPIs in place of local APIC timers
++ */
++static cpumask_t timer_bcast_ipi;
++#endif
++
++/*
++ * Knob to control our willingness to enable the local APIC.
++ */
++int enable_local_apic __initdata = 0; /* -1=force-disable, +1=force-enable */
++
++/*
++ * Debug level
++ */
++int apic_verbosity;
++
++#ifndef CONFIG_XEN
++static int modern_apic(void)
++{
++ unsigned int lvr, version;
++ /* AMD systems use old APIC versions, so check the CPU */
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
++ boot_cpu_data.x86 >= 0xf)
++ return 1;
++ lvr = apic_read(APIC_LVR);
++ version = GET_APIC_VERSION(lvr);
++ return version >= 0x14;
++}
++#endif /* !CONFIG_XEN */
++
++/*
++ * 'what should we do if we get a hw irq event on an illegal vector'.
++ * each architecture has to answer this themselves.
++ */
++void ack_bad_irq(unsigned int irq)
++{
++ printk("unexpected IRQ trap at vector %02x\n", irq);
++ /*
++ * Currently unexpected vectors happen only on SMP and APIC.
++ * We _must_ ack these because every local APIC has only N
++ * irq slots per priority level, and a 'hanging, unacked' IRQ
++ * holds up an irq slot - in excessive cases (when multiple
++ * unexpected vectors occur) that might lock up the APIC
++ * completely.
++ * But only ack when the APIC is enabled -AK
++ */
++ if (cpu_has_apic)
++ ack_APIC_irq();
++}
++
++int get_physical_broadcast(void)
++{
++ return 0xff;
++}
++
++#ifndef CONFIG_XEN
++#ifndef CONFIG_SMP
++static void up_apic_timer_interrupt_call(struct pt_regs *regs)
++{
++ int cpu = smp_processor_id();
++
++ /*
++ * the NMI deadlock-detector uses this.
++ */
++ per_cpu(irq_stat, cpu).apic_timer_irqs++;
++
++ smp_local_timer_interrupt(regs);
++}
++#endif
++
++void smp_send_timer_broadcast_ipi(struct pt_regs *regs)
++{
++ cpumask_t mask;
++
++ cpus_and(mask, cpu_online_map, timer_bcast_ipi);
++ if (!cpus_empty(mask)) {
++#ifdef CONFIG_SMP
++ send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
++#else
++ /*
++ * We can directly call the apic timer interrupt handler
++ * in UP case. Minus all irq related functions
++ */
++ up_apic_timer_interrupt_call(regs);
++#endif
++ }
++}
++#endif
++
++int setup_profiling_timer(unsigned int multiplier)
++{
++ return -EINVAL;
++}
++
++/*
++ * This initializes the IO-APIC and APIC hardware if this is
++ * a UP kernel.
++ */
++int __init APIC_init_uniprocessor (void)
++{
++#ifdef CONFIG_X86_IO_APIC
++ if (smp_found_config)
++ if (!skip_ioapic_setup && nr_ioapics)
++ setup_IO_APIC();
++#endif
++
++ return 0;
++}
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/asm-offsets.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/asm-offsets.c
+--- linux-2.6.18.8/arch/i386/kernel/asm-offsets.c 2007-02-23 15:52:30.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/asm-offsets.c 2008-02-15 16:21:49.000000000 -0800
+@@ -66,9 +66,14 @@ void foo(void)
+ OFFSET(pbe_orig_address, pbe, orig_address);
+ OFFSET(pbe_next, pbe, next);
+
++#ifndef CONFIG_X86_NO_TSS
+ /* Offset from the sysenter stack to tss.esp0 */
+- DEFINE(TSS_sysenter_esp0, offsetof(struct tss_struct, esp0) -
++ DEFINE(SYSENTER_stack_esp0, offsetof(struct tss_struct, esp0) -
+ sizeof(struct tss_struct));
++#else
++ /* sysenter stack points directly to esp0 */
++ DEFINE(SYSENTER_stack_esp0, 0);
++#endif
+
+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
+ DEFINE(VDSO_PRELINK, VDSO_PRELINK);
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/cpu/Makefile linux-2.6.18-xen-3.2.0/arch/i386/kernel/cpu/Makefile
+--- linux-2.6.18.8/arch/i386/kernel/cpu/Makefile 2007-02-23 15:52:30.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/cpu/Makefile 2008-02-15 16:21:49.000000000 -0800
+@@ -17,3 +17,4 @@ obj-$(CONFIG_X86_MCE) += mcheck/
+
+ obj-$(CONFIG_MTRR) += mtrr/
+ obj-$(CONFIG_CPU_FREQ) += cpufreq/
++
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/cpu/common-xen.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/cpu/common-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/cpu/common-xen.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/cpu/common-xen.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,743 @@
++#include <linux/init.h>
++#include <linux/string.h>
++#include <linux/delay.h>
++#include <linux/smp.h>
++#include <linux/module.h>
++#include <linux/percpu.h>
++#include <linux/bootmem.h>
++#include <asm/semaphore.h>
++#include <asm/processor.h>
++#include <asm/i387.h>
++#include <asm/msr.h>
++#include <asm/io.h>
++#include <asm/mmu_context.h>
++#include <asm/mtrr.h>
++#include <asm/mce.h>
++#ifdef CONFIG_X86_LOCAL_APIC
++#include <asm/mpspec.h>
++#include <asm/apic.h>
++#include <mach_apic.h>
++#else
++#ifdef CONFIG_XEN
++#define phys_pkg_id(a,b) a
++#endif
++#endif
++#include <asm/hypervisor.h>
++
++#include "cpu.h"
++
++DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
++EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr);
++
++#ifndef CONFIG_XEN
++DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
++EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
++#endif
++
++static int cachesize_override __cpuinitdata = -1;
++static int disable_x86_fxsr __cpuinitdata;
++static int disable_x86_serial_nr __cpuinitdata = 1;
++static int disable_x86_sep __cpuinitdata;
++
++struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
++
++extern int disable_pse;
++
++static void default_init(struct cpuinfo_x86 * c)
++{
++ /* Not much we can do here... */
++ /* Check if at least it has cpuid */
++ if (c->cpuid_level == -1) {
++ /* No cpuid. It must be an ancient CPU */
++ if (c->x86 == 4)
++ strcpy(c->x86_model_id, "486");
++ else if (c->x86 == 3)
++ strcpy(c->x86_model_id, "386");
++ }
++}
++
++static struct cpu_dev default_cpu = {
++ .c_init = default_init,
++ .c_vendor = "Unknown",
++};
++static struct cpu_dev * this_cpu = &default_cpu;
++
++static int __init cachesize_setup(char *str)
++{
++ get_option (&str, &cachesize_override);
++ return 1;
++}
++__setup("cachesize=", cachesize_setup);
++
++int __cpuinit get_model_name(struct cpuinfo_x86 *c)
++{
++ unsigned int *v;
++ char *p, *q;
++
++ if (cpuid_eax(0x80000000) < 0x80000004)
++ return 0;
++
++ v = (unsigned int *) c->x86_model_id;
++ cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
++ cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
++ cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
++ c->x86_model_id[48] = 0;
++
++ /* Intel chips right-justify this string for some dumb reason;
++ undo that brain damage */
++ p = q = &c->x86_model_id[0];
++ while ( *p == ' ' )
++ p++;
++ if ( p != q ) {
++ while ( *p )
++ *q++ = *p++;
++ while ( q <= &c->x86_model_id[48] )
++ *q++ = '\0'; /* Zero-pad the rest */
++ }
++
++ return 1;
++}
++
++
++void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
++{
++ unsigned int n, dummy, ecx, edx, l2size;
++
++ n = cpuid_eax(0x80000000);
++
++ if (n >= 0x80000005) {
++ cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
++ printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
++ edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
++ c->x86_cache_size=(ecx>>24)+(edx>>24);
++ }
++
++ if (n < 0x80000006) /* Some chips just has a large L1. */
++ return;
++
++ ecx = cpuid_ecx(0x80000006);
++ l2size = ecx >> 16;
++
++ /* do processor-specific cache resizing */
++ if (this_cpu->c_size_cache)
++ l2size = this_cpu->c_size_cache(c,l2size);
++
++ /* Allow user to override all this if necessary. */
++ if (cachesize_override != -1)
++ l2size = cachesize_override;
++
++ if ( l2size == 0 )
++ return; /* Again, no L2 cache is possible */
++
++ c->x86_cache_size = l2size;
++
++ printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
++ l2size, ecx & 0xFF);
++}
++
++/* Naming convention should be: <Name> [(<Codename>)] */
++/* This table only is used unless init_<vendor>() below doesn't set it; */
++/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
++
++/* Look up CPU names by table lookup. */
++static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
++{
++ struct cpu_model_info *info;
++
++ if ( c->x86_model >= 16 )
++ return NULL; /* Range check */
++
++ if (!this_cpu)
++ return NULL;
++
++ info = this_cpu->c_models;
++
++ while (info && info->family) {
++ if (info->family == c->x86)
++ return info->model_names[c->x86_model];
++ info++;
++ }
++ return NULL; /* Not found */
++}
++
++
++static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
++{
++ char *v = c->x86_vendor_id;
++ int i;
++ static int printed;
++
++ for (i = 0; i < X86_VENDOR_NUM; i++) {
++ if (cpu_devs[i]) {
++ if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
++ (cpu_devs[i]->c_ident[1] &&
++ !strcmp(v,cpu_devs[i]->c_ident[1]))) {
++ c->x86_vendor = i;
++ if (!early)
++ this_cpu = cpu_devs[i];
++ return;
++ }
++ }
++ }
++ if (!printed) {
++ printed++;
++ printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
++ printk(KERN_ERR "CPU: Your system may be unstable.\n");
++ }
++ c->x86_vendor = X86_VENDOR_UNKNOWN;
++ this_cpu = &default_cpu;
++}
++
++
++static int __init x86_fxsr_setup(char * s)
++{
++ disable_x86_fxsr = 1;
++ return 1;
++}
++__setup("nofxsr", x86_fxsr_setup);
++
++
++static int __init x86_sep_setup(char * s)
++{
++ disable_x86_sep = 1;
++ return 1;
++}
++__setup("nosep", x86_sep_setup);
++
++
++/* Standard macro to see if a specific flag is changeable */
++static inline int flag_is_changeable_p(u32 flag)
++{
++ u32 f1, f2;
++
++ asm("pushfl\n\t"
++ "pushfl\n\t"
++ "popl %0\n\t"
++ "movl %0,%1\n\t"
++ "xorl %2,%0\n\t"
++ "pushl %0\n\t"
++ "popfl\n\t"
++ "pushfl\n\t"
++ "popl %0\n\t"
++ "popfl\n\t"
++ : "=&r" (f1), "=&r" (f2)
++ : "ir" (flag));
++
++ return ((f1^f2) & flag) != 0;
++}
++
++
++/* Probe for the CPUID instruction */
++static int __cpuinit have_cpuid_p(void)
++{
++ return flag_is_changeable_p(X86_EFLAGS_ID);
++}
++
++/* Do minimum CPU detection early.
++ Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
++ The others are not touched to avoid unwanted side effects.
++
++ WARNING: this function is only called on the BP. Don't add code here
++ that is supposed to run on all CPUs. */
++static void __init early_cpu_detect(void)
++{
++ struct cpuinfo_x86 *c = &boot_cpu_data;
++
++ c->x86_cache_alignment = 32;
++
++ if (!have_cpuid_p())
++ return;
++
++ /* Get vendor name */
++ cpuid(0x00000000, &c->cpuid_level,
++ (int *)&c->x86_vendor_id[0],
++ (int *)&c->x86_vendor_id[8],
++ (int *)&c->x86_vendor_id[4]);
++
++ get_cpu_vendor(c, 1);
++
++ c->x86 = 4;
++ if (c->cpuid_level >= 0x00000001) {
++ u32 junk, tfms, cap0, misc;
++ cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
++ c->x86 = (tfms >> 8) & 15;
++ c->x86_model = (tfms >> 4) & 15;
++ if (c->x86 == 0xf)
++ c->x86 += (tfms >> 20) & 0xff;
++ if (c->x86 >= 0x6)
++ c->x86_model += ((tfms >> 16) & 0xF) << 4;
++ c->x86_mask = tfms & 15;
++ if (cap0 & (1<<19))
++ c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
++ }
++}
++
++void __cpuinit generic_identify(struct cpuinfo_x86 * c)
++{
++ u32 tfms, xlvl;
++ int ebx;
++
++ if (have_cpuid_p()) {
++ /* Get vendor name */
++ cpuid(0x00000000, &c->cpuid_level,
++ (int *)&c->x86_vendor_id[0],
++ (int *)&c->x86_vendor_id[8],
++ (int *)&c->x86_vendor_id[4]);
++
++ get_cpu_vendor(c, 0);
++ /* Initialize the standard set of capabilities */
++ /* Note that the vendor-specific code below might override */
++
++ /* Intel-defined flags: level 0x00000001 */
++ if ( c->cpuid_level >= 0x00000001 ) {
++ u32 capability, excap;
++ cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
++ c->x86_capability[0] = capability;
++ c->x86_capability[4] = excap;
++ c->x86 = (tfms >> 8) & 15;
++ c->x86_model = (tfms >> 4) & 15;
++ if (c->x86 == 0xf)
++ c->x86 += (tfms >> 20) & 0xff;
++ if (c->x86 >= 0x6)
++ c->x86_model += ((tfms >> 16) & 0xF) << 4;
++ c->x86_mask = tfms & 15;
++#ifdef CONFIG_X86_HT
++ c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
++#else
++ c->apicid = (ebx >> 24) & 0xFF;
++#endif
++ } else {
++ /* Have CPUID level 0 only - unheard of */
++ c->x86 = 4;
++ }
++
++ /* AMD-defined flags: level 0x80000001 */
++ xlvl = cpuid_eax(0x80000000);
++ if ( (xlvl & 0xffff0000) == 0x80000000 ) {
++ if ( xlvl >= 0x80000001 ) {
++ c->x86_capability[1] = cpuid_edx(0x80000001);
++ c->x86_capability[6] = cpuid_ecx(0x80000001);
++ }
++ if ( xlvl >= 0x80000004 )
++ get_model_name(c); /* Default name */
++ }
++ }
++
++ early_intel_workaround(c);
++
++#ifdef CONFIG_X86_HT
++ c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
++#endif
++}
++
++static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
++{
++ if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
++ /* Disable processor serial number */
++ unsigned long lo,hi;
++ rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
++ lo |= 0x200000;
++ wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
++ printk(KERN_NOTICE "CPU serial number disabled.\n");
++ clear_bit(X86_FEATURE_PN, c->x86_capability);
++
++ /* Disabling the serial number may affect the cpuid level */
++ c->cpuid_level = cpuid_eax(0);
++ }
++}
++
++static int __init x86_serial_nr_setup(char *s)
++{
++ disable_x86_serial_nr = 0;
++ return 1;
++}
++__setup("serialnumber", x86_serial_nr_setup);
++
++
++
++/*
++ * This does the hard work of actually picking apart the CPU stuff...
++ */
++void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
++{
++ int i;
++
++ c->loops_per_jiffy = loops_per_jiffy;
++ c->x86_cache_size = -1;
++ c->x86_vendor = X86_VENDOR_UNKNOWN;
++ c->cpuid_level = -1; /* CPUID not detected */
++ c->x86_model = c->x86_mask = 0; /* So far unknown... */
++ c->x86_vendor_id[0] = '\0'; /* Unset */
++ c->x86_model_id[0] = '\0'; /* Unset */
++ c->x86_max_cores = 1;
++ memset(&c->x86_capability, 0, sizeof c->x86_capability);
++
++ if (!have_cpuid_p()) {
++ /* First of all, decide if this is a 486 or higher */
++ /* It's a 486 if we can modify the AC flag */
++ if ( flag_is_changeable_p(X86_EFLAGS_AC) )
++ c->x86 = 4;
++ else
++ c->x86 = 3;
++ }
++
++ generic_identify(c);
++
++ printk(KERN_DEBUG "CPU: After generic identify, caps:");
++ for (i = 0; i < NCAPINTS; i++)
++ printk(" %08lx", c->x86_capability[i]);
++ printk("\n");
++
++ if (this_cpu->c_identify) {
++ this_cpu->c_identify(c);
++
++ printk(KERN_DEBUG "CPU: After vendor identify, caps:");
++ for (i = 0; i < NCAPINTS; i++)
++ printk(" %08lx", c->x86_capability[i]);
++ printk("\n");
++ }
++
++ /*
++ * Vendor-specific initialization. In this section we
++ * canonicalize the feature flags, meaning if there are
++ * features a certain CPU supports which CPUID doesn't
++ * tell us, CPUID claiming incorrect flags, or other bugs,
++ * we handle them here.
++ *
++ * At the end of this section, c->x86_capability better
++ * indicate the features this CPU genuinely supports!
++ */
++ if (this_cpu->c_init)
++ this_cpu->c_init(c);
++
++ /* Disable the PN if appropriate */
++ squash_the_stupid_serial_number(c);
++
++ /*
++ * The vendor-specific functions might have changed features. Now
++ * we do "generic changes."
++ */
++
++ /* TSC disabled? */
++ if ( tsc_disable )
++ clear_bit(X86_FEATURE_TSC, c->x86_capability);
++
++ /* FXSR disabled? */
++ if (disable_x86_fxsr) {
++ clear_bit(X86_FEATURE_FXSR, c->x86_capability);
++ clear_bit(X86_FEATURE_XMM, c->x86_capability);
++ }
++
++ /* SEP disabled? */
++ if (disable_x86_sep)
++ clear_bit(X86_FEATURE_SEP, c->x86_capability);
++
++ if (disable_pse)
++ clear_bit(X86_FEATURE_PSE, c->x86_capability);
++
++ /* If the model name is still unset, do table lookup. */
++ if ( !c->x86_model_id[0] ) {
++ char *p;
++ p = table_lookup_model(c);
++ if ( p )
++ strcpy(c->x86_model_id, p);
++ else
++ /* Last resort... */
++ sprintf(c->x86_model_id, "%02x/%02x",
++ c->x86, c->x86_model);
++ }
++
++ /* Now the feature flags better reflect actual CPU features! */
++
++ printk(KERN_DEBUG "CPU: After all inits, caps:");
++ for (i = 0; i < NCAPINTS; i++)
++ printk(" %08lx", c->x86_capability[i]);
++ printk("\n");
++
++ /*
++ * On SMP, boot_cpu_data holds the common feature set between
++ * all CPUs; so make sure that we indicate which features are
++ * common between the CPUs. The first time this routine gets
++ * executed, c == &boot_cpu_data.
++ */
++ if ( c != &boot_cpu_data ) {
++ /* AND the already accumulated flags with these */
++ for ( i = 0 ; i < NCAPINTS ; i++ )
++ boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
++ }
++
++ /* Init Machine Check Exception if available. */
++ mcheck_init(c);
++
++ if (c == &boot_cpu_data)
++ sysenter_setup();
++ enable_sep_cpu();
++
++ if (c == &boot_cpu_data)
++ mtrr_bp_init();
++ else
++ mtrr_ap_init();
++}
++
++#ifdef CONFIG_X86_HT
++void __cpuinit detect_ht(struct cpuinfo_x86 *c)
++{
++ u32 eax, ebx, ecx, edx;
++ int index_msb, core_bits;
++
++ cpuid(1, &eax, &ebx, &ecx, &edx);
++
++ if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
++ return;
++
++ smp_num_siblings = (ebx & 0xff0000) >> 16;
++
++ if (smp_num_siblings == 1) {
++ printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
++ } else if (smp_num_siblings > 1 ) {
++
++ if (smp_num_siblings > NR_CPUS) {
++ printk(KERN_WARNING "CPU: Unsupported number of the "
++ "siblings %d", smp_num_siblings);
++ smp_num_siblings = 1;
++ return;
++ }
++
++ index_msb = get_count_order(smp_num_siblings);
++ c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
++
++ printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
++ c->phys_proc_id);
++
++ smp_num_siblings = smp_num_siblings / c->x86_max_cores;
++
++ index_msb = get_count_order(smp_num_siblings) ;
++
++ core_bits = get_count_order(c->x86_max_cores);
++
++ c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
++ ((1 << core_bits) - 1);
++
++ if (c->x86_max_cores > 1)
++ printk(KERN_INFO "CPU: Processor Core ID: %d\n",
++ c->cpu_core_id);
++ }
++}
++#endif
++
++void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
++{
++ char *vendor = NULL;
++
++ if (c->x86_vendor < X86_VENDOR_NUM)
++ vendor = this_cpu->c_vendor;
++ else if (c->cpuid_level >= 0)
++ vendor = c->x86_vendor_id;
++
++ if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
++ printk("%s ", vendor);
++
++ if (!c->x86_model_id[0])
++ printk("%d86", c->x86);
++ else
++ printk("%s", c->x86_model_id);
++
++ if (c->x86_mask || c->cpuid_level >= 0)
++ printk(" stepping %02x\n", c->x86_mask);
++ else
++ printk("\n");
++}
++
++cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
++
++/* This is hacky. :)
++ * We're emulating future behavior.
++ * In the future, the cpu-specific init functions will be called implicitly
++ * via the magic of initcalls.
++ * They will insert themselves into the cpu_devs structure.
++ * Then, when cpu_init() is called, we can just iterate over that array.
++ */
++
++extern int intel_cpu_init(void);
++extern int cyrix_init_cpu(void);
++extern int nsc_init_cpu(void);
++extern int amd_init_cpu(void);
++extern int centaur_init_cpu(void);
++extern int transmeta_init_cpu(void);
++extern int rise_init_cpu(void);
++extern int nexgen_init_cpu(void);
++extern int umc_init_cpu(void);
++
++void __init early_cpu_init(void)
++{
++ intel_cpu_init();
++ cyrix_init_cpu();
++ nsc_init_cpu();
++ amd_init_cpu();
++ centaur_init_cpu();
++ transmeta_init_cpu();
++ rise_init_cpu();
++ nexgen_init_cpu();
++ umc_init_cpu();
++ early_cpu_detect();
++
++#ifdef CONFIG_DEBUG_PAGEALLOC
++ /* pse is not compatible with on-the-fly unmapping,
++ * disable it even if the cpus claim to support it.
++ */
++ clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
++ disable_pse = 1;
++#endif
++}
++
++static void __cpuinit cpu_gdt_init(const struct Xgt_desc_struct *gdt_descr)
++{
++ unsigned long frames[16];
++ unsigned long va;
++ int f;
++
++ for (va = gdt_descr->address, f = 0;
++ va < gdt_descr->address + gdt_descr->size;
++ va += PAGE_SIZE, f++) {
++ frames[f] = virt_to_mfn(va);
++ make_lowmem_page_readonly(
++ (void *)va, XENFEAT_writable_descriptor_tables);
++ }
++ if (HYPERVISOR_set_gdt(frames, (gdt_descr->size + 1) / 8))
++ BUG();
++}
++
++/*
++ * cpu_init() initializes state that is per-CPU. Some data is already
++ * initialized (naturally) in the bootstrap process, such as the GDT
++ * and IDT. We reload them nevertheless, this function acts as a
++ * 'CPU state barrier', nothing should get across.
++ */
++void __cpuinit cpu_init(void)
++{
++ int cpu = smp_processor_id();
++#ifndef CONFIG_X86_NO_TSS
++ struct tss_struct * t = &per_cpu(init_tss, cpu);
++#endif
++ struct thread_struct *thread = &current->thread;
++ struct desc_struct *gdt;
++ struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
++
++ if (cpu_test_and_set(cpu, cpu_initialized)) {
++ printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
++ for (;;) local_irq_enable();
++ }
++ printk(KERN_INFO "Initializing CPU#%d\n", cpu);
++
++ if (cpu_has_vme || cpu_has_de)
++ clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
++ if (tsc_disable && cpu_has_tsc) {
++ printk(KERN_NOTICE "Disabling TSC...\n");
++ /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
++ clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
++ set_in_cr4(X86_CR4_TSD);
++ }
++
++#ifndef CONFIG_XEN
++ /* The CPU hotplug case */
++ if (cpu_gdt_descr->address) {
++ gdt = (struct desc_struct *)cpu_gdt_descr->address;
++ memset(gdt, 0, PAGE_SIZE);
++ goto old_gdt;
++ }
++ /*
++ * This is a horrible hack to allocate the GDT. The problem
++ * is that cpu_init() is called really early for the boot CPU
++ * (and hence needs bootmem) but much later for the secondary
++ * CPUs, when bootmem will have gone away
++ */
++ if (NODE_DATA(0)->bdata->node_bootmem_map) {
++ gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE);
++ /* alloc_bootmem_pages panics on failure, so no check */
++ memset(gdt, 0, PAGE_SIZE);
++ } else {
++ gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL);
++ if (unlikely(!gdt)) {
++ printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
++ for (;;)
++ local_irq_enable();
++ }
++ }
++old_gdt:
++ /*
++ * Initialize the per-CPU GDT with the boot GDT,
++ * and set up the GDT descriptor:
++ */
++ memcpy(gdt, cpu_gdt_table, GDT_SIZE);
++
++ /* Set up GDT entry for 16bit stack */
++ *(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |=
++ ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) |
++ ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |
++ (CPU_16BIT_STACK_SIZE - 1);
++
++ cpu_gdt_descr->size = GDT_SIZE - 1;
++ cpu_gdt_descr->address = (unsigned long)gdt;
++#else
++ if (cpu == 0 && cpu_gdt_descr->address == 0) {
++ gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE);
++ /* alloc_bootmem_pages panics on failure, so no check */
++ memset(gdt, 0, PAGE_SIZE);
++
++ memcpy(gdt, cpu_gdt_table, GDT_SIZE);
++
++ cpu_gdt_descr->size = GDT_SIZE;
++ cpu_gdt_descr->address = (unsigned long)gdt;
++ }
++#endif
++
++ cpu_gdt_init(cpu_gdt_descr);
++
++ /*
++ * Set up and load the per-CPU TSS and LDT
++ */
++ atomic_inc(&init_mm.mm_count);
++ current->active_mm = &init_mm;
++ if (current->mm)
++ BUG();
++ enter_lazy_tlb(&init_mm, current);
++
++ load_esp0(t, thread);
++
++ load_LDT(&init_mm.context);
++
++#ifdef CONFIG_DOUBLEFAULT
++ /* Set up doublefault TSS pointer in the GDT */
++ __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
++#endif
++
++ /* Clear %fs and %gs. */
++ asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
++
++ /* Clear all 6 debug registers: */
++ set_debugreg(0, 0);
++ set_debugreg(0, 1);
++ set_debugreg(0, 2);
++ set_debugreg(0, 3);
++ set_debugreg(0, 6);
++ set_debugreg(0, 7);
++
++ /*
++ * Force FPU initialization:
++ */
++ current_thread_info()->status = 0;
++ clear_used_math();
++ mxcsr_feature_mask_init();
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++void __cpuinit cpu_uninit(void)
++{
++ int cpu = raw_smp_processor_id();
++ cpu_clear(cpu, cpu_initialized);
++
++ /* lazy TLB state */
++ per_cpu(cpu_tlbstate, cpu).state = 0;
++ per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
++}
++#endif
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/cpu/cpufreq/powernow-k8.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+--- linux-2.6.18.8/arch/i386/kernel/cpu/cpufreq/powernow-k8.c 2007-02-23 15:52:30.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/cpu/cpufreq/powernow-k8.c 2008-02-15 16:21:49.000000000 -0800
+@@ -46,7 +46,7 @@
+
+ #define PFX "powernow-k8: "
+ #define BFX PFX "BIOS error: "
+-#define VERSION "version 2.00.00"
++#define VERSION "version 2.20.00"
+ #include "powernow-k8.h"
+
+ /* serialize freq changes */
+@@ -66,36 +66,15 @@ static u32 find_freq_from_fid(u32 fid)
+ return 800 + (fid * 100);
+ }
+
+-
+ /* Return a frequency in KHz, given an input fid */
+ static u32 find_khz_freq_from_fid(u32 fid)
+ {
+ return 1000 * find_freq_from_fid(fid);
+ }
+
+-/* Return a frequency in MHz, given an input fid and did */
+-static u32 find_freq_from_fiddid(u32 fid, u32 did)
+-{
+- return 100 * (fid + 0x10) >> did;
+-}
+-
+-static u32 find_khz_freq_from_fiddid(u32 fid, u32 did)
+-{
+- return 1000 * find_freq_from_fiddid(fid, did);
+-}
+-
+-static u32 find_fid_from_pstate(u32 pstate)
+-{
+- u32 hi, lo;
+- rdmsr(MSR_PSTATE_DEF_BASE + pstate, lo, hi);
+- return lo & HW_PSTATE_FID_MASK;
+-}
+-
+-static u32 find_did_from_pstate(u32 pstate)
++static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data, u32 pstate)
+ {
+- u32 hi, lo;
+- rdmsr(MSR_PSTATE_DEF_BASE + pstate, lo, hi);
+- return (lo & HW_PSTATE_DID_MASK) >> HW_PSTATE_DID_SHIFT;
++ return data[pstate].frequency;
+ }
+
+ /* Return the vco fid for an input fid
+@@ -139,9 +118,7 @@ static int query_current_values_with_pen
+ if (cpu_family == CPU_HW_PSTATE) {
+ rdmsr(MSR_PSTATE_STATUS, lo, hi);
+ i = lo & HW_PSTATE_MASK;
+- rdmsr(MSR_PSTATE_DEF_BASE + i, lo, hi);
+- data->currfid = lo & HW_PSTATE_FID_MASK;
+- data->currdid = (lo & HW_PSTATE_DID_MASK) >> HW_PSTATE_DID_SHIFT;
++ data->currpstate = i;
+ return 0;
+ }
+ do {
+@@ -292,7 +269,7 @@ static int decrease_vid_code_by_step(str
+ static int transition_pstate(struct powernow_k8_data *data, u32 pstate)
+ {
+ wrmsr(MSR_PSTATE_CTRL, pstate, 0);
+- data->currfid = find_fid_from_pstate(pstate);
++ data->currpstate = pstate;
+ return 0;
+ }
+
+@@ -738,6 +715,7 @@ static int find_psb_table(struct powerno
+
+ data->numps = psb->numps;
+ dprintk("numpstates: 0x%x\n", data->numps);
++ data->starting_core_affinity = cpumask_of_cpu(0);
+ return fill_powernow_table(data, (struct pst_s *)(psb+1), maxvid);
+ }
+ /*
+@@ -758,15 +736,43 @@ static int find_psb_table(struct powerno
+ #ifdef CONFIG_X86_POWERNOW_K8_ACPI
+ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index)
+ {
+- if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE))
++ if (!data->acpi_data->state_count || (cpu_family == CPU_HW_PSTATE))
+ return;
+
+- data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK;
+- data->rvo = (data->acpi_data.states[index].control >> RVO_SHIFT) & RVO_MASK;
+- data->exttype = (data->acpi_data.states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK;
+- data->plllock = (data->acpi_data.states[index].control >> PLL_L_SHIFT) & PLL_L_MASK;
+- data->vidmvs = 1 << ((data->acpi_data.states[index].control >> MVS_SHIFT) & MVS_MASK);
+- data->vstable = (data->acpi_data.states[index].control >> VST_SHIFT) & VST_MASK;
++ data->irt = (data->acpi_data->states[index].control >> IRT_SHIFT) & IRT_MASK;
++ data->rvo = (data->acpi_data->states[index].control >> RVO_SHIFT) & RVO_MASK;
++ data->exttype = (data->acpi_data->states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK;
++ data->plllock = (data->acpi_data->states[index].control >> PLL_L_SHIFT) & PLL_L_MASK;
++ data->vidmvs = 1 << ((data->acpi_data->states[index].control >> MVS_SHIFT) & MVS_MASK);
++ data->vstable = (data->acpi_data->states[index].control >> VST_SHIFT) & VST_MASK;
++}
++
++static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
++static int preregister_valid = 0;
++
++static int powernow_k8_cpu_preinit_acpi()
++{
++ int i;
++ struct acpi_processor_performance *data;
++ for_each_possible_cpu(i) {
++ data = kzalloc(sizeof(struct acpi_processor_performance),
++ GFP_KERNEL);
++ if (!data) {
++ int j;
++ for_each_possible_cpu(j) {
++ kfree(acpi_perf_data[j]);
++ acpi_perf_data[j] = NULL;
++ }
++ return -ENODEV;
++ }
++ acpi_perf_data[i] = data;
++ }
++
++ if (acpi_processor_preregister_performance(acpi_perf_data))
++ return -ENODEV;
++ else
++ preregister_valid = 1;
++ return 0;
+ }
+
+ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
+@@ -774,28 +780,29 @@ static int powernow_k8_cpu_init_acpi(str
+ struct cpufreq_frequency_table *powernow_table;
+ int ret_val;
+
+- if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
++ data->acpi_data = acpi_perf_data[data->cpu];
++ if (acpi_processor_register_performance(data->acpi_data, data->cpu)) {
+ dprintk("register performance failed: bad ACPI data\n");
+ return -EIO;
+ }
+
+ /* verify the data contained in the ACPI structures */
+- if (data->acpi_data.state_count <= 1) {
++ if (data->acpi_data->state_count <= 1) {
+ dprintk("No ACPI P-States\n");
+ goto err_out;
+ }
+
+- if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
+- (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
++ if ((data->acpi_data->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
++ (data->acpi_data->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
+ dprintk("Invalid control/status registers (%x - %x)\n",
+- data->acpi_data.control_register.space_id,
+- data->acpi_data.status_register.space_id);
++ data->acpi_data->control_register.space_id,
++ data->acpi_data->status_register.space_id);
+ goto err_out;
+ }
+
+ /* fill in data->powernow_table */
+ powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
+- * (data->acpi_data.state_count + 1)), GFP_KERNEL);
++ * (data->acpi_data->state_count + 1)), GFP_KERNEL);
+ if (!powernow_table) {
+ dprintk("powernow_table memory alloc failure\n");
+ goto err_out;
+@@ -808,28 +815,43 @@ static int powernow_k8_cpu_init_acpi(str
+ if (ret_val)
+ goto err_out_mem;
+
+- powernow_table[data->acpi_data.state_count].frequency = CPUFREQ_TABLE_END;
+- powernow_table[data->acpi_data.state_count].index = 0;
++ powernow_table[data->acpi_data->state_count].frequency = CPUFREQ_TABLE_END;
++ powernow_table[data->acpi_data->state_count].index = 0;
+ data->powernow_table = powernow_table;
+
+ /* fill in data */
+- data->numps = data->acpi_data.state_count;
++ data->numps = data->acpi_data->state_count;
+ print_basics(data);
+ powernow_k8_acpi_pst_values(data, 0);
+
+ /* notify BIOS that we exist */
+ acpi_processor_notify_smm(THIS_MODULE);
+
++ /* determine affinity, from ACPI if available */
++ if (preregister_valid) {
++ if ((data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ALL) ||
++ (data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ANY))
++ data->starting_core_affinity = data->acpi_data->shared_cpu_map;
++ else
++ data->starting_core_affinity = cpumask_of_cpu(data->cpu);
++ } else {
++ /* best guess from family if not */
++ if (cpu_family == CPU_HW_PSTATE)
++ data->starting_core_affinity = cpumask_of_cpu(data->cpu);
++ else
++ data->starting_core_affinity = cpu_core_map[data->cpu];
++ }
++
+ return 0;
+
+ err_out_mem:
+ kfree(powernow_table);
+
+ err_out:
+- acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
++ acpi_processor_unregister_performance(data->acpi_data, data->cpu);
+
+- /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */
+- data->acpi_data.state_count = 0;
++ /* data->acpi_data->state_count informs us at ->exit() whether ACPI was used */
++ data->acpi_data->state_count = 0;
+
+ return -ENODEV;
+ }
+@@ -837,41 +859,23 @@ err_out:
+ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table)
+ {
+ int i;
++ u32 hi = 0, lo = 0;
++ rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo);
++ data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT;
+
+- for (i = 0; i < data->acpi_data.state_count; i++) {
++ for (i = 0; i < data->acpi_data->state_count; i++) {
+ u32 index;
+- u32 hi = 0, lo = 0;
+- u32 fid;
+- u32 did;
+
+- index = data->acpi_data.states[i].control & HW_PSTATE_MASK;
+- if (index > MAX_HW_PSTATE) {
++ index = data->acpi_data->states[i].control & HW_PSTATE_MASK;
++ if (index > data->max_hw_pstate) {
+ printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index);
+ printk(KERN_ERR PFX "Please report to BIOS manufacturer\n");
+- }
+- rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
+- if (!(hi & HW_PSTATE_VALID_MASK)) {
+- dprintk("invalid pstate %d, ignoring\n", index);
+- powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ continue;
+ }
+
+- fid = lo & HW_PSTATE_FID_MASK;
+- did = (lo & HW_PSTATE_DID_MASK) >> HW_PSTATE_DID_SHIFT;
+-
+- dprintk(" %d : fid 0x%x, did 0x%x\n", index, fid, did);
++ powernow_table[i].index = index;
++ powernow_table[i].frequency = data->acpi_data->states[i].core_frequency * 1000;
+
+- powernow_table[i].index = index | (fid << HW_FID_INDEX_SHIFT) | (did << HW_DID_INDEX_SHIFT);
+-
+- powernow_table[i].frequency = find_khz_freq_from_fiddid(fid, did);
+-
+- if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) {
+- printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n",
+- powernow_table[i].frequency,
+- (unsigned int) (data->acpi_data.states[i].core_frequency * 1000));
+- powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+- continue;
+- }
+ }
+ return 0;
+ }
+@@ -880,16 +884,16 @@ static int fill_powernow_table_fidvid(st
+ {
+ int i;
+ int cntlofreq = 0;
+- for (i = 0; i < data->acpi_data.state_count; i++) {
++ for (i = 0; i < data->acpi_data->state_count; i++) {
+ u32 fid;
+ u32 vid;
+
+ if (data->exttype) {
+- fid = data->acpi_data.states[i].status & EXT_FID_MASK;
+- vid = (data->acpi_data.states[i].status >> VID_SHIFT) & EXT_VID_MASK;
++ fid = data->acpi_data->states[i].status & EXT_FID_MASK;
++ vid = (data->acpi_data->states[i].status >> VID_SHIFT) & EXT_VID_MASK;
+ } else {
+- fid = data->acpi_data.states[i].control & FID_MASK;
+- vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK;
++ fid = data->acpi_data->states[i].control & FID_MASK;
++ vid = (data->acpi_data->states[i].control >> VID_SHIFT) & VID_MASK;
+ }
+
+ dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid);
+@@ -930,10 +934,10 @@ static int fill_powernow_table_fidvid(st
+ cntlofreq = i;
+ }
+
+- if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) {
++ if (powernow_table[i].frequency != (data->acpi_data->states[i].core_frequency * 1000)) {
+ printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n",
+ powernow_table[i].frequency,
+- (unsigned int) (data->acpi_data.states[i].core_frequency * 1000));
++ (unsigned int) (data->acpi_data->states[i].core_frequency * 1000));
+ powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ continue;
+ }
+@@ -943,14 +947,15 @@ static int fill_powernow_table_fidvid(st
+
+ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
+ {
+- if (data->acpi_data.state_count)
+- acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
++ if (data->acpi_data->state_count)
++ acpi_processor_unregister_performance(data->acpi_data, data->cpu);
+ }
+
+ #else
+ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; }
+ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; }
+ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; }
++static int powernow_k8_cpu_preinit_acpi() { return -ENODEV; }
+ #endif /* CONFIG_X86_POWERNOW_K8_ACPI */
+
+ /* Take a frequency, and issue the fid/vid transition command */
+@@ -1012,22 +1017,18 @@ static int transition_frequency_fidvid(s
+ /* Take a frequency, and issue the hardware pstate transition command */
+ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned int index)
+ {
+- u32 fid = 0;
+- u32 did = 0;
+ u32 pstate = 0;
+ int res, i;
+ struct cpufreq_freqs freqs;
+
+ dprintk("cpu %d transition to index %u\n", smp_processor_id(), index);
+
+- /* get fid did for hardware pstate transition */
++ /* get MSR index for hardware pstate transition */
+ pstate = index & HW_PSTATE_MASK;
+- if (pstate > MAX_HW_PSTATE)
++ if (pstate > data->max_hw_pstate)
+ return 0;
+- fid = (index & HW_FID_INDEX_MASK) >> HW_FID_INDEX_SHIFT;
+- did = (index & HW_DID_INDEX_MASK) >> HW_DID_INDEX_SHIFT;
+- freqs.old = find_khz_freq_from_fiddid(data->currfid, data->currdid);
+- freqs.new = find_khz_freq_from_fiddid(fid, did);
++ freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
++ freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
+
+ for_each_cpu_mask(i, *(data->available_cores)) {
+ freqs.cpu = i;
+@@ -1035,9 +1036,7 @@ static int transition_frequency_pstate(s
+ }
+
+ res = transition_pstate(data, pstate);
+- data->currfid = find_fid_from_pstate(pstate);
+- data->currdid = find_did_from_pstate(pstate);
+- freqs.new = find_khz_freq_from_fiddid(data->currfid, data->currdid);
++ freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
+
+ for_each_cpu_mask(i, *(data->available_cores)) {
+ freqs.cpu = i;
+@@ -1082,10 +1081,7 @@ static int powernowk8_target(struct cpuf
+ if (query_current_values_with_pending_wait(data))
+ goto err_out;
+
+- if (cpu_family == CPU_HW_PSTATE)
+- dprintk("targ: curr fid 0x%x, did 0x%x\n",
+- data->currfid, data->currvid);
+- else {
++ if (cpu_family != CPU_HW_PSTATE) {
+ dprintk("targ: curr fid 0x%x, vid 0x%x\n",
+ data->currfid, data->currvid);
+
+@@ -1116,7 +1112,7 @@ static int powernowk8_target(struct cpuf
+ mutex_unlock(&fidvid_mutex);
+
+ if (cpu_family == CPU_HW_PSTATE)
+- pol->cur = find_khz_freq_from_fiddid(data->currfid, data->currdid);
++ pol->cur = find_khz_freq_from_pstate(data->powernow_table, newstate);
+ else
+ pol->cur = find_khz_freq_from_fid(data->currfid);
+ ret = 0;
+@@ -1164,7 +1160,7 @@ static int __cpuinit powernowk8_cpu_init
+ * an UP version, and is deprecated by AMD.
+ */
+ if (num_online_cpus() != 1) {
+- printk(KERN_ERR PFX "MP systems not supported by PSB BIOS structure\n");
++ printk(KERN_ERR PFX "Your BIOS does not provide _PSS objects. PowerNow! does not work on SMP systems without _PSS objects. Complain to your BIOS vendor.\n");
+ kfree(data);
+ return -ENODEV;
+ }
+@@ -1204,10 +1200,7 @@ static int __cpuinit powernowk8_cpu_init
+ set_cpus_allowed(current, oldmask);
+
+ pol->governor = CPUFREQ_DEFAULT_GOVERNOR;
+- if (cpu_family == CPU_HW_PSTATE)
+- pol->cpus = cpumask_of_cpu(pol->cpu);
+- else
+- pol->cpus = cpu_core_map[pol->cpu];
++ pol->cpus = data->starting_core_affinity;
+ data->available_cores = &(pol->cpus);
+
+ /* Take a crude guess here.
+@@ -1216,7 +1209,7 @@ static int __cpuinit powernowk8_cpu_init
+ + (3 * (1 << data->irt) * 10)) * 1000;
+
+ if (cpu_family == CPU_HW_PSTATE)
+- pol->cur = find_khz_freq_from_fiddid(data->currfid, data->currdid);
++ pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
+ else
+ pol->cur = find_khz_freq_from_fid(data->currfid);
+ dprintk("policy current frequency %d kHz\n", pol->cur);
+@@ -1233,8 +1226,7 @@ static int __cpuinit powernowk8_cpu_init
+ cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
+
+ if (cpu_family == CPU_HW_PSTATE)
+- dprintk("cpu_init done, current fid 0x%x, did 0x%x\n",
+- data->currfid, data->currdid);
++ dprintk("cpu_init done, current pstate 0x%x\n", data->currpstate);
+ else
+ dprintk("cpu_init done, current fid 0x%x, vid 0x%x\n",
+ data->currfid, data->currvid);
+@@ -1289,7 +1281,10 @@ static unsigned int powernowk8_get (unsi
+ if (query_current_values_with_pending_wait(data))
+ goto out;
+
+- khz = find_khz_freq_from_fid(data->currfid);
++ if (cpu_family == CPU_HW_PSTATE)
++ khz = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
++ else
++ khz = find_khz_freq_from_fid(data->currfid);
+
+ out:
+ set_cpus_allowed(current, oldmask);
+@@ -1323,6 +1318,7 @@ static int __cpuinit powernowk8_init(voi
+ }
+
+ if (supported_cpus == num_online_cpus()) {
++ powernow_k8_cpu_preinit_acpi();
+ printk(KERN_INFO PFX "Found %d %s "
+ "processors (" VERSION ")\n", supported_cpus,
+ boot_cpu_data.x86_model_id);
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/cpu/cpufreq/powernow-k8.h linux-2.6.18-xen-3.2.0/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
+--- linux-2.6.18.8/arch/i386/kernel/cpu/cpufreq/powernow-k8.h 2007-02-23 15:52:30.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/cpu/cpufreq/powernow-k8.h 2008-02-15 16:21:49.000000000 -0800
+@@ -1,5 +1,5 @@
+ /*
+- * (c) 2003-2006 Advanced Micro Devices, Inc.
++#* (c) 2003-2006 Advanced Micro Devices, Inc.
+ * Your use of this code is subject to the terms and conditions of the
+ * GNU general public license version 2. See "COPYING" or
+ * http://www.gnu.org/licenses/gpl.html
+@@ -10,6 +10,7 @@ struct powernow_k8_data {
+
+ u32 numps; /* number of p-states */
+ u32 batps; /* number of p-states supported on battery */
++ u32 max_hw_pstate; /* maximum legal hardware pstate */
+
+ /* these values are constant when the PSB is used to determine
+ * vid/fid pairings, but are modified during the ->target() call
+@@ -21,8 +22,8 @@ struct powernow_k8_data {
+ u32 plllock; /* pll lock time, units 1 us */
+ u32 exttype; /* extended interface = 1 */
+
+- /* keep track of the current fid / vid or did */
+- u32 currvid, currfid, currdid;
++ /* keep track of the current fid / vid or pstate */
++ u32 currvid, currfid, currpstate;
+
+ /* the powernow_table includes all frequency and vid/fid pairings:
+ * fid are the lower 8 bits of the index, vid are the upper 8 bits.
+@@ -32,12 +33,13 @@ struct powernow_k8_data {
+ #ifdef CONFIG_X86_POWERNOW_K8_ACPI
+ /* the acpi table needs to be kept. it's only available if ACPI was
+ * used to determine valid frequency/vid/fid states */
+- struct acpi_processor_performance acpi_data;
++ struct acpi_processor_performance *acpi_data;
+ #endif
+ /* we need to keep track of associated cores, but let cpufreq
+ * handle hotplug events - so just point at cpufreq pol->cpus
+ * structure */
+ cpumask_t *available_cores;
++ cpumask_t starting_core_affinity;
+ };
+
+
+@@ -87,23 +89,14 @@ struct powernow_k8_data {
+
+ /* Hardware Pstate _PSS and MSR definitions */
+ #define USE_HW_PSTATE 0x00000080
+-#define HW_PSTATE_FID_MASK 0x0000003f
+-#define HW_PSTATE_DID_MASK 0x000001c0
+-#define HW_PSTATE_DID_SHIFT 6
+-#define HW_PSTATE_MASK 0x00000007
+-#define HW_PSTATE_VALID_MASK 0x80000000
+-#define HW_FID_INDEX_SHIFT 8
+-#define HW_FID_INDEX_MASK 0x0000ff00
+-#define HW_DID_INDEX_SHIFT 16
+-#define HW_DID_INDEX_MASK 0x00ff0000
+-#define HW_WATTS_MASK 0xff
+-#define HW_PWR_DVR_MASK 0x300
+-#define HW_PWR_DVR_SHIFT 8
+-#define HW_PWR_MAX_MULT 3
+-#define MAX_HW_PSTATE 8 /* hw pstate supports up to 8 */
++#define HW_PSTATE_MASK 0x00000007
++#define HW_PSTATE_VALID_MASK 0x80000000
++#define HW_PSTATE_MAX_MASK 0x000000f0
++#define HW_PSTATE_MAX_SHIFT 4
+ #define MSR_PSTATE_DEF_BASE 0xc0010064 /* base of Pstate MSRs */
+ #define MSR_PSTATE_STATUS 0xc0010063 /* Pstate Status MSR */
+ #define MSR_PSTATE_CTRL 0xc0010062 /* Pstate control MSR */
++#define MSR_PSTATE_CUR_LIMIT 0xc0010061 /* pstate current limit MSR */
+
+ /* define the two driver architectures */
+ #define CPU_OPTERON 0
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/cpu/mtrr/Makefile linux-2.6.18-xen-3.2.0/arch/i386/kernel/cpu/mtrr/Makefile
+--- linux-2.6.18.8/arch/i386/kernel/cpu/mtrr/Makefile 2007-02-23 15:52:30.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/cpu/mtrr/Makefile 2008-02-15 16:21:49.000000000 -0800
+@@ -3,3 +3,4 @@ obj-y += amd.o
+ obj-y += cyrix.o
+ obj-y += centaur.o
+
++obj-$(CONFIG_XEN) := main.o if.o
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/cpu/mtrr/main-xen.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/cpu/mtrr/main-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/cpu/mtrr/main-xen.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/cpu/mtrr/main-xen.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,198 @@
++#include <linux/init.h>
++#include <linux/proc_fs.h>
++#include <linux/ctype.h>
++#include <linux/module.h>
++#include <linux/seq_file.h>
++#include <asm/uaccess.h>
++#include <linux/mutex.h>
++
++#include <asm/mtrr.h>
++#include "mtrr.h"
++
++static DEFINE_MUTEX(mtrr_mutex);
++
++void generic_get_mtrr(unsigned int reg, unsigned long *base,
++ unsigned int *size, mtrr_type * type)
++{
++ struct xen_platform_op op;
++
++ op.cmd = XENPF_read_memtype;
++ op.u.read_memtype.reg = reg;
++ if (unlikely(HYPERVISOR_platform_op(&op)))
++ memset(&op.u.read_memtype, 0, sizeof(op.u.read_memtype));
++
++ *size = op.u.read_memtype.nr_mfns;
++ *base = op.u.read_memtype.mfn;
++ *type = op.u.read_memtype.type;
++}
++
++struct mtrr_ops generic_mtrr_ops = {
++ .use_intel_if = 1,
++ .get = generic_get_mtrr,
++};
++
++struct mtrr_ops *mtrr_if = &generic_mtrr_ops;
++unsigned int num_var_ranges;
++unsigned int *usage_table;
++
++static void __init set_num_var_ranges(void)
++{
++ struct xen_platform_op op;
++
++ for (num_var_ranges = 0; ; num_var_ranges++) {
++ op.cmd = XENPF_read_memtype;
++ op.u.read_memtype.reg = num_var_ranges;
++ if (HYPERVISOR_platform_op(&op) != 0)
++ break;
++ }
++}
++
++static void __init init_table(void)
++{
++ int i, max;
++
++ max = num_var_ranges;
++ if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL))
++ == NULL) {
++ printk(KERN_ERR "mtrr: could not allocate\n");
++ return;
++ }
++ for (i = 0; i < max; i++)
++ usage_table[i] = 0;
++}
++
++int mtrr_add_page(unsigned long base, unsigned long size,
++ unsigned int type, char increment)
++{
++ int error;
++ struct xen_platform_op op;
++
++ mutex_lock(&mtrr_mutex);
++
++ op.cmd = XENPF_add_memtype;
++ op.u.add_memtype.mfn = base;
++ op.u.add_memtype.nr_mfns = size;
++ op.u.add_memtype.type = type;
++ error = HYPERVISOR_platform_op(&op);
++ if (error) {
++ mutex_unlock(&mtrr_mutex);
++ BUG_ON(error > 0);
++ return error;
++ }
++
++ if (increment)
++ ++usage_table[op.u.add_memtype.reg];
++
++ mutex_unlock(&mtrr_mutex);
++
++ return op.u.add_memtype.reg;
++}
++
++static int mtrr_check(unsigned long base, unsigned long size)
++{
++ if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
++ printk(KERN_WARNING
++ "mtrr: size and base must be multiples of 4 kiB\n");
++ printk(KERN_DEBUG
++ "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
++ dump_stack();
++ return -1;
++ }
++ return 0;
++}
++
++int
++mtrr_add(unsigned long base, unsigned long size, unsigned int type,
++ char increment)
++{
++ if (mtrr_check(base, size))
++ return -EINVAL;
++ return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
++ increment);
++}
++
++int mtrr_del_page(int reg, unsigned long base, unsigned long size)
++{
++ unsigned i;
++ mtrr_type ltype;
++ unsigned long lbase;
++ unsigned int lsize;
++ int error = -EINVAL;
++ struct xen_platform_op op;
++
++ mutex_lock(&mtrr_mutex);
++
++ if (reg < 0) {
++ /* Search for existing MTRR */
++ for (i = 0; i < num_var_ranges; ++i) {
++ mtrr_if->get(i, &lbase, &lsize, &ltype);
++ if (lbase == base && lsize == size) {
++ reg = i;
++ break;
++ }
++ }
++ if (reg < 0) {
++ printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
++ size);
++ goto out;
++ }
++ }
++ if (usage_table[reg] < 1) {
++ printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
++ goto out;
++ }
++ if (--usage_table[reg] < 1) {
++ op.cmd = XENPF_del_memtype;
++ op.u.del_memtype.handle = 0;
++ op.u.del_memtype.reg = reg;
++ error = HYPERVISOR_platform_op(&op);
++ if (error) {
++ BUG_ON(error > 0);
++ goto out;
++ }
++ }
++ error = reg;
++ out:
++ mutex_unlock(&mtrr_mutex);
++ return error;
++}
++
++int
++mtrr_del(int reg, unsigned long base, unsigned long size)
++{
++ if (mtrr_check(base, size))
++ return -EINVAL;
++ return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
++}
++
++EXPORT_SYMBOL(mtrr_add);
++EXPORT_SYMBOL(mtrr_del);
++
++void __init mtrr_bp_init(void)
++{
++}
++
++void mtrr_ap_init(void)
++{
++}
++
++static int __init mtrr_init(void)
++{
++ struct cpuinfo_x86 *c = &boot_cpu_data;
++
++ if (!is_initial_xendomain())
++ return -ENODEV;
++
++ if ((!cpu_has(c, X86_FEATURE_MTRR)) &&
++ (!cpu_has(c, X86_FEATURE_K6_MTRR)) &&
++ (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) &&
++ (!cpu_has(c, X86_FEATURE_CENTAUR_MCR)))
++ return -ENODEV;
++
++ set_num_var_ranges();
++ init_table();
++
++ return 0;
++}
++
++subsys_initcall(mtrr_init);
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/crash.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/crash.c
+--- linux-2.6.18.8/arch/i386/kernel/crash.c 2007-02-23 15:52:30.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/crash.c 2008-02-15 16:21:49.000000000 -0800
+@@ -90,6 +90,7 @@ static void crash_save_self(struct pt_re
+ crash_save_this_cpu(regs, cpu);
+ }
+
++#ifndef CONFIG_XEN
+ #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
+ static atomic_t waiting_for_crash_ipi;
+
+@@ -154,6 +155,7 @@ static void nmi_shootdown_cpus(void)
+ /* There are no cpus to shootdown */
+ }
+ #endif
++#endif /* CONFIG_XEN */
+
+ void machine_crash_shutdown(struct pt_regs *regs)
+ {
+@@ -170,10 +172,12 @@ void machine_crash_shutdown(struct pt_re
+
+ /* Make a note of crashing cpu. Will be used in NMI callback.*/
+ crashing_cpu = smp_processor_id();
++#ifndef CONFIG_XEN
+ nmi_shootdown_cpus();
+ lapic_shutdown();
+ #if defined(CONFIG_X86_IO_APIC)
+ disable_IO_APIC();
+ #endif
++#endif /* CONFIG_XEN */
+ crash_save_self(regs);
+ }
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/early_printk-xen.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/early_printk-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/early_printk-xen.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/early_printk-xen.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,2 @@
++
++#include "../../x86_64/kernel/early_printk-xen.c"
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/entry-xen.S linux-2.6.18-xen-3.2.0/arch/i386/kernel/entry-xen.S
+--- linux-2.6.18.8/arch/i386/kernel/entry-xen.S 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/entry-xen.S 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,1238 @@
++/*
++ * linux/arch/i386/entry.S
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ */
++
++/*
++ * entry.S contains the system-call and fault low-level handling routines.
++ * This also contains the timer-interrupt handler, as well as all interrupts
++ * and faults that can result in a task-switch.
++ *
++ * NOTE: This code handles signal-recognition, which happens every time
++ * after a timer-interrupt and after each system call.
++ *
++ * I changed all the .align's to 4 (16 byte alignment), as that's faster
++ * on a 486.
++ *
++ * Stack layout in 'ret_from_system_call':
++ * ptrace needs to have all regs on the stack.
++ * if the order here is changed, it needs to be
++ * updated in fork.c:copy_process, signal.c:do_signal,
++ * ptrace.c and ptrace.h
++ *
++ * 0(%esp) - %ebx
++ * 4(%esp) - %ecx
++ * 8(%esp) - %edx
++ * C(%esp) - %esi
++ * 10(%esp) - %edi
++ * 14(%esp) - %ebp
++ * 18(%esp) - %eax
++ * 1C(%esp) - %ds
++ * 20(%esp) - %es
++ * 24(%esp) - orig_eax
++ * 28(%esp) - %eip
++ * 2C(%esp) - %cs
++ * 30(%esp) - %eflags
++ * 34(%esp) - %oldesp
++ * 38(%esp) - %oldss
++ *
++ * "current" is in register %ebx during any slow entries.
++ */
++
++#include <linux/linkage.h>
++#include <asm/thread_info.h>
++#include <asm/irqflags.h>
++#include <asm/errno.h>
++#include <asm/segment.h>
++#include <asm/smp.h>
++#include <asm/page.h>
++#include <asm/desc.h>
++#include <asm/dwarf2.h>
++#include "irq_vectors.h"
++#include <xen/interface/xen.h>
++
++#define nr_syscalls ((syscall_table_size)/4)
++
++EBX = 0x00
++ECX = 0x04
++EDX = 0x08
++ESI = 0x0C
++EDI = 0x10
++EBP = 0x14
++EAX = 0x18
++DS = 0x1C
++ES = 0x20
++ORIG_EAX = 0x24
++EIP = 0x28
++CS = 0x2C
++EFLAGS = 0x30
++OLDESP = 0x34
++OLDSS = 0x38
++
++CF_MASK = 0x00000001
++TF_MASK = 0x00000100
++IF_MASK = 0x00000200
++DF_MASK = 0x00000400
++NT_MASK = 0x00004000
++VM_MASK = 0x00020000
++/* Pseudo-eflags. */
++NMI_MASK = 0x80000000
++
++#ifndef CONFIG_XEN
++#define DISABLE_INTERRUPTS cli
++#define ENABLE_INTERRUPTS sti
++#else
++/* Offsets into shared_info_t. */
++#define evtchn_upcall_pending /* 0 */
++#define evtchn_upcall_mask 1
++
++#define sizeof_vcpu_shift 6
++
++#ifdef CONFIG_SMP
++#define GET_VCPU_INFO movl TI_cpu(%ebp),%esi ; \
++ shl $sizeof_vcpu_shift,%esi ; \
++ addl HYPERVISOR_shared_info,%esi
++#else
++#define GET_VCPU_INFO movl HYPERVISOR_shared_info,%esi
++#endif
++
++#define __DISABLE_INTERRUPTS movb $1,evtchn_upcall_mask(%esi)
++#define __ENABLE_INTERRUPTS movb $0,evtchn_upcall_mask(%esi)
++#define DISABLE_INTERRUPTS GET_VCPU_INFO ; \
++ __DISABLE_INTERRUPTS
++#define ENABLE_INTERRUPTS GET_VCPU_INFO ; \
++ __ENABLE_INTERRUPTS
++#define __TEST_PENDING testb $0xFF,evtchn_upcall_pending(%esi)
++#endif
++
++#ifdef CONFIG_PREEMPT
++#define preempt_stop cli; TRACE_IRQS_OFF
++#else
++#define preempt_stop
++#define resume_kernel restore_nocheck
++#endif
++
++.macro TRACE_IRQS_IRET
++#ifdef CONFIG_TRACE_IRQFLAGS
++ testl $IF_MASK,EFLAGS(%esp) # interrupts off?
++ jz 1f
++ TRACE_IRQS_ON
++1:
++#endif
++.endm
++
++#ifdef CONFIG_VM86
++#define resume_userspace_sig check_userspace
++#else
++#define resume_userspace_sig resume_userspace
++#endif
++
++#define SAVE_ALL \
++ cld; \
++ pushl %es; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ /*CFI_REL_OFFSET es, 0;*/\
++ pushl %ds; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ /*CFI_REL_OFFSET ds, 0;*/\
++ pushl %eax; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET eax, 0;\
++ pushl %ebp; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ebp, 0;\
++ pushl %edi; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET edi, 0;\
++ pushl %esi; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET esi, 0;\
++ pushl %edx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET edx, 0;\
++ pushl %ecx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ecx, 0;\
++ pushl %ebx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ebx, 0;\
++ movl $(__USER_DS), %edx; \
++ movl %edx, %ds; \
++ movl %edx, %es;
++
++#define RESTORE_INT_REGS \
++ popl %ebx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ebx;\
++ popl %ecx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ecx;\
++ popl %edx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE edx;\
++ popl %esi; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE esi;\
++ popl %edi; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE edi;\
++ popl %ebp; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ebp;\
++ popl %eax; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE eax
++
++#define RESTORE_REGS \
++ RESTORE_INT_REGS; \
++1: popl %ds; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ /*CFI_RESTORE ds;*/\
++2: popl %es; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ /*CFI_RESTORE es;*/\
++.section .fixup,"ax"; \
++3: movl $0,(%esp); \
++ jmp 1b; \
++4: movl $0,(%esp); \
++ jmp 2b; \
++.previous; \
++.section __ex_table,"a";\
++ .align 4; \
++ .long 1b,3b; \
++ .long 2b,4b; \
++.previous
++
++#define RING0_INT_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_DEF_CFA esp, 3*4;\
++ /*CFI_OFFSET cs, -2*4;*/\
++ CFI_OFFSET eip, -3*4
++
++#define RING0_EC_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_DEF_CFA esp, 4*4;\
++ /*CFI_OFFSET cs, -2*4;*/\
++ CFI_OFFSET eip, -3*4
++
++#define RING0_PTREGS_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_DEF_CFA esp, OLDESP-EBX;\
++ /*CFI_OFFSET cs, CS-OLDESP;*/\
++ CFI_OFFSET eip, EIP-OLDESP;\
++ /*CFI_OFFSET es, ES-OLDESP;*/\
++ /*CFI_OFFSET ds, DS-OLDESP;*/\
++ CFI_OFFSET eax, EAX-OLDESP;\
++ CFI_OFFSET ebp, EBP-OLDESP;\
++ CFI_OFFSET edi, EDI-OLDESP;\
++ CFI_OFFSET esi, ESI-OLDESP;\
++ CFI_OFFSET edx, EDX-OLDESP;\
++ CFI_OFFSET ecx, ECX-OLDESP;\
++ CFI_OFFSET ebx, EBX-OLDESP
++
++ENTRY(ret_from_fork)
++ CFI_STARTPROC
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ call schedule_tail
++ GET_THREAD_INFO(%ebp)
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ pushl $0x0202 # Reset kernel eflags
++ CFI_ADJUST_CFA_OFFSET 4
++ popfl
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp syscall_exit
++ CFI_ENDPROC
++
++/*
++ * Return to user mode is not as complex as all this looks,
++ * but we want the default path for a system call return to
++ * go as quickly as possible which is why some of this is
++ * less clear than it otherwise should be.
++ */
++
++ # userspace resumption stub bypassing syscall exit tracing
++ ALIGN
++ RING0_PTREGS_FRAME
++ret_from_exception:
++ preempt_stop
++ret_from_intr:
++ GET_THREAD_INFO(%ebp)
++check_userspace:
++ movl EFLAGS(%esp), %eax # mix EFLAGS and CS
++ movb CS(%esp), %al
++ testl $(VM_MASK | 2), %eax
++ jz resume_kernel
++ENTRY(resume_userspace)
++ DISABLE_INTERRUPTS # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ movl TI_flags(%ebp), %ecx
++ andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
++ # int/exception return?
++ jne work_pending
++ jmp restore_all
++
++#ifdef CONFIG_PREEMPT
++ENTRY(resume_kernel)
++ cli
++ cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
++ jnz restore_nocheck
++need_resched:
++ movl TI_flags(%ebp), %ecx # need_resched set ?
++ testb $_TIF_NEED_RESCHED, %cl
++ jz restore_all
++ testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ?
++ jz restore_all
++ call preempt_schedule_irq
++ jmp need_resched
++#endif
++ CFI_ENDPROC
++
++/* SYSENTER_RETURN points to after the "sysenter" instruction in
++ the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
++
++ # sysenter call handler stub
++ENTRY(sysenter_entry)
++ CFI_STARTPROC simple
++ CFI_DEF_CFA esp, 0
++ CFI_REGISTER esp, ebp
++ movl SYSENTER_stack_esp0(%esp),%esp
++sysenter_past_esp:
++ /*
++ * No need to follow this irqs on/off section: the syscall
++ * disabled irqs and here we enable it straight after entry:
++ */
++ sti
++ pushl $(__USER_DS)
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET ss, 0*/
++ pushl %ebp
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET esp, 0
++ pushfl
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $(__USER_CS)
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET cs, 0*/
++ /*
++ * Push current_thread_info()->sysenter_return to the stack.
++ * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
++ * pushed above; +8 corresponds to copy_thread's esp0 setting.
++ */
++ pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET eip, 0
++
++/*
++ * Load the potential sixth argument from user stack.
++ * Careful about security.
++ */
++ cmpl $__PAGE_OFFSET-3,%ebp
++ jae syscall_fault
++1: movl (%ebp),%ebp
++.section __ex_table,"a"
++ .align 4
++ .long 1b,syscall_fault
++.previous
++
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++
++ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
++ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
++ jnz syscall_trace_entry
++ cmpl $(nr_syscalls), %eax
++ jae syscall_badsys
++ call *sys_call_table(,%eax,4)
++ movl %eax,EAX(%esp)
++ DISABLE_INTERRUPTS
++ TRACE_IRQS_OFF
++ movl TI_flags(%ebp), %ecx
++ testw $_TIF_ALLWORK_MASK, %cx
++ jne syscall_exit_work
++/* if something modifies registers it must also disable sysexit */
++ movl EIP(%esp), %edx
++ movl OLDESP(%esp), %ecx
++ xorl %ebp,%ebp
++#ifdef CONFIG_XEN
++ TRACE_IRQS_ON
++ __ENABLE_INTERRUPTS
++sysexit_scrit: /**** START OF SYSEXIT CRITICAL REGION ****/
++ __TEST_PENDING
++ jnz 14f # process more events if necessary...
++ movl ESI(%esp), %esi
++ sysexit
++14: __DISABLE_INTERRUPTS
++ TRACE_IRQS_OFF
++sysexit_ecrit: /**** END OF SYSEXIT CRITICAL REGION ****/
++ push %esp
++ call evtchn_do_upcall
++ add $4,%esp
++ jmp ret_from_intr
++#else
++ TRACE_IRQS_ON
++ sti
++ sysexit
++#endif /* !CONFIG_XEN */
++ CFI_ENDPROC
++
++ # pv sysenter call handler stub
++ENTRY(sysenter_entry_pv)
++ RING0_INT_FRAME
++ movl $__USER_DS,16(%esp)
++ movl %ebp,12(%esp)
++ movl $__USER_CS,4(%esp)
++ addl $4,%esp
++ /* +5*4 is SS:ESP,EFLAGS,CS:EIP. +8 is esp0 setting. */
++ pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
++/*
++ * Load the potential sixth argument from user stack.
++ * Careful about security.
++ */
++ cmpl $__PAGE_OFFSET-3,%ebp
++ jae syscall_fault
++1: movl (%ebp),%ebp
++.section __ex_table,"a"
++ .align 4
++ .long 1b,syscall_fault
++.previous
++ /* fall through */
++ CFI_ENDPROC
++ENDPROC(sysenter_entry_pv)
++
++ # system call handler stub
++ENTRY(system_call)
++ RING0_INT_FRAME # can't unwind into user space anyway
++ pushl %eax # save orig_eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++ testl $TF_MASK,EFLAGS(%esp)
++ jz no_singlestep
++ orl $_TIF_SINGLESTEP,TI_flags(%ebp)
++no_singlestep:
++ # system call tracing in operation / emulation
++ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
++ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
++ jnz syscall_trace_entry
++ cmpl $(nr_syscalls), %eax
++ jae syscall_badsys
++syscall_call:
++ call *sys_call_table(,%eax,4)
++ movl %eax,EAX(%esp) # store the return value
++syscall_exit:
++ DISABLE_INTERRUPTS # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ TRACE_IRQS_OFF
++ movl TI_flags(%ebp), %ecx
++ testw $_TIF_ALLWORK_MASK, %cx # current->work
++ jne syscall_exit_work
++
++restore_all:
++#ifndef CONFIG_XEN
++ movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
++ # Warning: OLDSS(%esp) contains the wrong/random values if we
++ # are returning to the kernel.
++ # See comments in process.c:copy_thread() for details.
++ movb OLDSS(%esp), %ah
++ movb CS(%esp), %al
++ andl $(VM_MASK | (4 << 8) | 3), %eax
++ cmpl $((4 << 8) | 3), %eax
++ CFI_REMEMBER_STATE
++ je ldt_ss # returning to user-space with LDT SS
++restore_nocheck:
++#else
++restore_nocheck:
++ movl EFLAGS(%esp), %eax
++ testl $(VM_MASK|NMI_MASK), %eax
++ CFI_REMEMBER_STATE
++ jnz hypervisor_iret
++ shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
++ GET_VCPU_INFO
++ andb evtchn_upcall_mask(%esi),%al
++ andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask
++ CFI_REMEMBER_STATE
++ jnz restore_all_enable_events # != 0 => enable event delivery
++#endif
++ TRACE_IRQS_IRET
++restore_nocheck_notrace:
++ RESTORE_REGS
++ addl $4, %esp
++ CFI_ADJUST_CFA_OFFSET -4
++1: iret
++.section .fixup,"ax"
++iret_exc:
++#ifndef CONFIG_XEN
++ TRACE_IRQS_ON
++ sti
++#endif
++ pushl $0 # no error code
++ pushl $do_iret_error
++ jmp error_code
++.previous
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++
++ CFI_RESTORE_STATE
++#ifndef CONFIG_XEN
++ldt_ss:
++ larl OLDSS(%esp), %eax
++ jnz restore_nocheck
++ testl $0x00400000, %eax # returning to 32bit stack?
++ jnz restore_nocheck # allright, normal return
++ /* If returning to userspace with 16bit stack,
++ * try to fix the higher word of ESP, as the CPU
++ * won't restore it.
++ * This is an "official" bug of all the x86-compatible
++ * CPUs, which we can try to work around to make
++ * dosemu and wine happy. */
++ subl $8, %esp # reserve space for switch16 pointer
++ CFI_ADJUST_CFA_OFFSET 8
++ cli
++ TRACE_IRQS_OFF
++ movl %esp, %eax
++ /* Set up the 16bit stack frame with switch32 pointer on top,
++ * and a switch16 pointer on top of the current frame. */
++ call setup_x86_bogus_stack
++ CFI_ADJUST_CFA_OFFSET -8 # frame has moved
++ TRACE_IRQS_IRET
++ RESTORE_REGS
++ lss 20+4(%esp), %esp # switch to 16bit stack
++1: iret
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++#else
++ ALIGN
++restore_all_enable_events:
++ TRACE_IRQS_ON
++ __ENABLE_INTERRUPTS
++scrit: /**** START OF CRITICAL REGION ****/
++ __TEST_PENDING
++ jnz 14f # process more events if necessary...
++ RESTORE_REGS
++ addl $4, %esp
++ CFI_ADJUST_CFA_OFFSET -4
++1: iret
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++14: __DISABLE_INTERRUPTS
++ TRACE_IRQS_OFF
++ jmp 11f
++ecrit: /**** END OF CRITICAL REGION ****/
++
++ CFI_RESTORE_STATE
++hypervisor_iret:
++ andl $~NMI_MASK, EFLAGS(%esp)
++ RESTORE_REGS
++ addl $4, %esp
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp hypercall_page + (__HYPERVISOR_iret * 32)
++#endif
++ CFI_ENDPROC
++
++ # perform work that needs to be done immediately before resumption
++ ALIGN
++ RING0_PTREGS_FRAME # can't unwind into user space anyway
++work_pending:
++ testb $_TIF_NEED_RESCHED, %cl
++ jz work_notifysig
++work_resched:
++ call schedule
++ DISABLE_INTERRUPTS # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ TRACE_IRQS_OFF
++ movl TI_flags(%ebp), %ecx
++ andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
++ # than syscall tracing?
++ jz restore_all
++ testb $_TIF_NEED_RESCHED, %cl
++ jnz work_resched
++
++work_notifysig: # deal with pending signals and
++ # notify-resume requests
++ testl $VM_MASK, EFLAGS(%esp)
++ movl %esp, %eax
++ jne work_notifysig_v86 # returning to kernel-space or
++ # vm86-space
++ xorl %edx, %edx
++ call do_notify_resume
++ jmp resume_userspace_sig
++
++ ALIGN
++work_notifysig_v86:
++#ifdef CONFIG_VM86
++ pushl %ecx # save ti_flags for do_notify_resume
++ CFI_ADJUST_CFA_OFFSET 4
++ call save_v86_state # %eax contains pt_regs pointer
++ popl %ecx
++ CFI_ADJUST_CFA_OFFSET -4
++ movl %eax, %esp
++ xorl %edx, %edx
++ call do_notify_resume
++ jmp resume_userspace_sig
++#endif
++
++ # perform syscall exit tracing
++ ALIGN
++syscall_trace_entry:
++ movl $-ENOSYS,EAX(%esp)
++ movl %esp, %eax
++ xorl %edx,%edx
++ call do_syscall_trace
++ cmpl $0, %eax
++ jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
++ # so must skip actual syscall
++ movl ORIG_EAX(%esp), %eax
++ cmpl $(nr_syscalls), %eax
++ jnae syscall_call
++ jmp syscall_exit
++
++ # perform syscall exit tracing
++ ALIGN
++syscall_exit_work:
++ testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
++ jz work_pending
++ TRACE_IRQS_ON
++ ENABLE_INTERRUPTS # could let do_syscall_trace() call
++ # schedule() instead
++ movl %esp, %eax
++ movl $1, %edx
++ call do_syscall_trace
++ jmp resume_userspace
++ CFI_ENDPROC
++
++ RING0_INT_FRAME # can't unwind into user space anyway
++syscall_fault:
++ pushl %eax # save orig_eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++ movl $-EFAULT,EAX(%esp)
++ jmp resume_userspace
++
++syscall_badsys:
++ movl $-ENOSYS,EAX(%esp)
++ jmp resume_userspace
++ CFI_ENDPROC
++
++#ifndef CONFIG_XEN
++#define FIXUP_ESPFIX_STACK \
++ movl %esp, %eax; \
++ /* switch to 32bit stack using the pointer on top of 16bit stack */ \
++ lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
++ /* copy data from 16bit stack to 32bit stack */ \
++ call fixup_x86_bogus_stack; \
++ /* put ESP to the proper location */ \
++ movl %eax, %esp;
++#define UNWIND_ESPFIX_STACK \
++ pushl %eax; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ movl %ss, %eax; \
++ /* see if on 16bit stack */ \
++ cmpw $__ESPFIX_SS, %ax; \
++ je 28f; \
++27: popl %eax; \
++ CFI_ADJUST_CFA_OFFSET -4; \
++.section .fixup,"ax"; \
++28: movl $__KERNEL_DS, %eax; \
++ movl %eax, %ds; \
++ movl %eax, %es; \
++ /* switch to 32bit stack */ \
++ FIXUP_ESPFIX_STACK; \
++ jmp 27b; \
++.previous
++
++/*
++ * Build the entry stubs and pointer table with
++ * some assembler magic.
++ */
++.data
++ENTRY(interrupt)
++.text
++
++vector=0
++ENTRY(irq_entries_start)
++ RING0_INT_FRAME
++.rept NR_IRQS
++ ALIGN
++ .if vector
++ CFI_ADJUST_CFA_OFFSET -4
++ .endif
++1: pushl $~(vector)
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp common_interrupt
++.data
++ .long 1b
++.text
++vector=vector+1
++.endr
++
++/*
++ * the CPU automatically disables interrupts when executing an IRQ vector,
++ * so IRQ-flags tracing has to follow that:
++ */
++ ALIGN
++common_interrupt:
++ SAVE_ALL
++ TRACE_IRQS_OFF
++ movl %esp,%eax
++ call do_IRQ
++ jmp ret_from_intr
++ CFI_ENDPROC
++
++#define BUILD_INTERRUPT(name, nr) \
++ENTRY(name) \
++ RING0_INT_FRAME; \
++ pushl $~(nr); \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ SAVE_ALL; \
++ TRACE_IRQS_OFF \
++ movl %esp,%eax; \
++ call smp_/**/name; \
++ jmp ret_from_intr; \
++ CFI_ENDPROC
++
++/* The include is where all of the SMP etc. interrupts come from */
++#include "entry_arch.h"
++#else
++#define UNWIND_ESPFIX_STACK
++#endif
++
++ENTRY(divide_error)
++ RING0_INT_FRAME
++ pushl $0 # no error code
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_divide_error
++ CFI_ADJUST_CFA_OFFSET 4
++ ALIGN
++error_code:
++ pushl %ds
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET ds, 0*/
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET eax, 0
++ xorl %eax, %eax
++ pushl %ebp
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ebp, 0
++ pushl %edi
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET edi, 0
++ pushl %esi
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET esi, 0
++ pushl %edx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET edx, 0
++ decl %eax # eax = -1
++ pushl %ecx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ecx, 0
++ pushl %ebx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ebx, 0
++ cld
++ pushl %es
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET es, 0*/
++ UNWIND_ESPFIX_STACK
++ popl %ecx
++ CFI_ADJUST_CFA_OFFSET -4
++ /*CFI_REGISTER es, ecx*/
++ movl ES(%esp), %edi # get the function address
++ movl ORIG_EAX(%esp), %edx # get the error code
++ movl %eax, ORIG_EAX(%esp)
++ movl %ecx, ES(%esp)
++ /*CFI_REL_OFFSET es, ES*/
++ movl $(__USER_DS), %ecx
++ movl %ecx, %ds
++ movl %ecx, %es
++ movl %esp,%eax # pt_regs pointer
++ call *%edi
++ jmp ret_from_exception
++ CFI_ENDPROC
++
++#ifdef CONFIG_XEN
++# A note on the "critical region" in our callback handler.
++# We want to avoid stacking callback handlers due to events occurring
++# during handling of the last event. To do this, we keep events disabled
++# until we've done all processing. HOWEVER, we must enable events before
++# popping the stack frame (can't be done atomically) and so it would still
++# be possible to get enough handler activations to overflow the stack.
++# Although unlikely, bugs of that kind are hard to track down, so we'd
++# like to avoid the possibility.
++# So, on entry to the handler we detect whether we interrupted an
++# existing activation in its critical region -- if so, we pop the current
++# activation and restart the handler using the previous one.
++#
++# The sysexit critical region is slightly different. sysexit
++# atomically removes the entire stack frame. If we interrupt in the
++# critical region we know that the entire frame is present and correct
++# so we can simply throw away the new one.
++ENTRY(hypervisor_callback)
++ RING0_INT_FRAME
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ movl EIP(%esp),%eax
++ cmpl $scrit,%eax
++ jb 11f
++ cmpl $ecrit,%eax
++ jb critical_region_fixup
++ cmpl $sysexit_scrit,%eax
++ jb 11f
++ cmpl $sysexit_ecrit,%eax
++ ja 11f
++ addl $OLDESP,%esp # Remove eflags...ebx from stack frame.
++11: push %esp
++ CFI_ADJUST_CFA_OFFSET 4
++ call evtchn_do_upcall
++ add $4,%esp
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp ret_from_intr
++ CFI_ENDPROC
++
++# [How we do the fixup]. We want to merge the current stack frame with the
++# just-interrupted frame. How we do this depends on where in the critical
++# region the interrupted handler was executing, and so how many saved
++# registers are in each frame. We do this quickly using the lookup table
++# 'critical_fixup_table'. For each byte offset in the critical region, it
++# provides the number of bytes which have already been popped from the
++# interrupted stack frame.
++critical_region_fixup:
++ movzbl critical_fixup_table-scrit(%eax),%ecx # %eax contains num bytes popped
++ cmpb $0xff,%cl # 0xff => vcpu_info critical region
++ jne 15f
++ xorl %ecx,%ecx
++15: leal (%esp,%ecx),%esi # %esi points at end of src region
++ leal OLDESP(%esp),%edi # %edi points at end of dst region
++ shrl $2,%ecx # convert words to bytes
++ je 17f # skip loop if nothing to copy
++16: subl $4,%esi # pre-decrementing copy loop
++ subl $4,%edi
++ movl (%esi),%eax
++ movl %eax,(%edi)
++ loop 16b
++17: movl %edi,%esp # final %edi is top of merged stack
++ jmp 11b
++
++.section .rodata,"a"
++critical_fixup_table:
++ .byte 0xff,0xff,0xff # testb $0xff,(%esi) = __TEST_PENDING
++ .byte 0xff,0xff # jnz 14f
++ .byte 0x00 # pop %ebx
++ .byte 0x04 # pop %ecx
++ .byte 0x08 # pop %edx
++ .byte 0x0c # pop %esi
++ .byte 0x10 # pop %edi
++ .byte 0x14 # pop %ebp
++ .byte 0x18 # pop %eax
++ .byte 0x1c # pop %ds
++ .byte 0x20 # pop %es
++ .byte 0x24,0x24,0x24 # add $4,%esp
++ .byte 0x28 # iret
++ .byte 0xff,0xff,0xff,0xff # movb $1,1(%esi)
++ .byte 0x00,0x00 # jmp 11b
++.previous
++
++# Hypervisor uses this for application faults while it executes.
++# We get here for two reasons:
++# 1. Fault while reloading DS, ES, FS or GS
++# 2. Fault while executing IRET
++# Category 1 we fix up by reattempting the load, and zeroing the segment
++# register if the load fails.
++# Category 2 we fix up by jumping to do_iret_error. We cannot use the
++# normal Linux return path in this case because if we use the IRET hypercall
++# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
++# We distinguish between categories by maintaining a status value in EAX.
++ENTRY(failsafe_callback)
++ pushl %eax
++ movl $1,%eax
++1: mov 4(%esp),%ds
++2: mov 8(%esp),%es
++3: mov 12(%esp),%fs
++4: mov 16(%esp),%gs
++ testl %eax,%eax
++ popl %eax
++ jz 5f
++ addl $16,%esp # EAX != 0 => Category 2 (Bad IRET)
++ jmp iret_exc
++5: addl $16,%esp # EAX == 0 => Category 1 (Bad segment)
++ RING0_INT_FRAME
++ pushl $0
++ SAVE_ALL
++ jmp ret_from_exception
++.section .fixup,"ax"; \
++6: xorl %eax,%eax; \
++ movl %eax,4(%esp); \
++ jmp 1b; \
++7: xorl %eax,%eax; \
++ movl %eax,8(%esp); \
++ jmp 2b; \
++8: xorl %eax,%eax; \
++ movl %eax,12(%esp); \
++ jmp 3b; \
++9: xorl %eax,%eax; \
++ movl %eax,16(%esp); \
++ jmp 4b; \
++.previous; \
++.section __ex_table,"a"; \
++ .align 4; \
++ .long 1b,6b; \
++ .long 2b,7b; \
++ .long 3b,8b; \
++ .long 4b,9b; \
++.previous
++#endif
++ CFI_ENDPROC
++
++ENTRY(coprocessor_error)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_coprocessor_error
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(simd_coprocessor_error)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_simd_coprocessor_error
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(device_not_available)
++ RING0_INT_FRAME
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++#ifndef CONFIG_XEN
++ movl %cr0, %eax
++ testl $0x4, %eax # EM (math emulation bit)
++ je device_available_emulate
++ pushl $0 # temporary storage for ORIG_EIP
++ CFI_ADJUST_CFA_OFFSET 4
++ call math_emulate
++ addl $4, %esp
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp ret_from_exception
++device_available_emulate:
++#endif
++ preempt_stop
++ call math_state_restore
++ jmp ret_from_exception
++ CFI_ENDPROC
++
++#ifndef CONFIG_XEN
++/*
++ * Debug traps and NMI can happen at the one SYSENTER instruction
++ * that sets up the real kernel stack. Check here, since we can't
++ * allow the wrong stack to be used.
++ *
++ * "SYSENTER_stack_esp0+12" is because the NMI/debug handler will have
++ * already pushed 3 words if it hits on the sysenter instruction:
++ * eflags, cs and eip.
++ *
++ * We just load the right stack, and push the three (known) values
++ * by hand onto the new stack - while updating the return eip past
++ * the instruction that would have done it for sysenter.
++ */
++#define FIX_STACK(offset, ok, label) \
++ cmpw $__KERNEL_CS,4(%esp); \
++ jne ok; \
++label: \
++ movl SYSENTER_stack_esp0+offset(%esp),%esp; \
++ pushfl; \
++ pushl $__KERNEL_CS; \
++ pushl $sysenter_past_esp
++#endif /* CONFIG_XEN */
++
++KPROBE_ENTRY(debug)
++ RING0_INT_FRAME
++#ifndef CONFIG_XEN
++ cmpl $sysenter_entry,(%esp)
++ jne debug_stack_correct
++ FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
++debug_stack_correct:
++#endif /* !CONFIG_XEN */
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # error code 0
++ movl %esp,%eax # pt_regs pointer
++ call do_debug
++ jmp ret_from_exception
++ CFI_ENDPROC
++ .previous .text
++#ifndef CONFIG_XEN
++/*
++ * NMI is doubly nasty. It can happen _while_ we're handling
++ * a debug fault, and the debug fault hasn't yet been able to
++ * clear up the stack. So we first check whether we got an
++ * NMI on the sysenter entry path, but after that we need to
++ * check whether we got an NMI on the debug path where the debug
++ * fault happened on the sysenter path.
++ */
++ENTRY(nmi)
++ RING0_INT_FRAME
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ movl %ss, %eax
++ cmpw $__ESPFIX_SS, %ax
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ je nmi_16bit_stack
++ cmpl $sysenter_entry,(%esp)
++ je nmi_stack_fixup
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ movl %esp,%eax
++ /* Do not access memory above the end of our stack page,
++ * it might not exist.
++ */
++ andl $(THREAD_SIZE-1),%eax
++ cmpl $(THREAD_SIZE-20),%eax
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ jae nmi_stack_correct
++ cmpl $sysenter_entry,12(%esp)
++ je nmi_debug_stack_check
++nmi_stack_correct:
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # zero error code
++ movl %esp,%eax # pt_regs pointer
++ call do_nmi
++ jmp restore_nocheck_notrace
++ CFI_ENDPROC
++
++nmi_stack_fixup:
++ FIX_STACK(12,nmi_stack_correct, 1)
++ jmp nmi_stack_correct
++nmi_debug_stack_check:
++ cmpw $__KERNEL_CS,16(%esp)
++ jne nmi_stack_correct
++ cmpl $debug,(%esp)
++ jb nmi_stack_correct
++ cmpl $debug_esp_fix_insn,(%esp)
++ ja nmi_stack_correct
++ FIX_STACK(24,nmi_stack_correct, 1)
++ jmp nmi_stack_correct
++
++nmi_16bit_stack:
++ RING0_INT_FRAME
++ /* create the pointer to lss back */
++ pushl %ss
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl %esp
++ CFI_ADJUST_CFA_OFFSET 4
++ movzwl %sp, %esp
++ addw $4, (%esp)
++ /* copy the iret frame of 12 bytes */
++ .rept 3
++ pushl 16(%esp)
++ CFI_ADJUST_CFA_OFFSET 4
++ .endr
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ FIXUP_ESPFIX_STACK # %eax == %esp
++ CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved
++ xorl %edx,%edx # zero error code
++ call do_nmi
++ RESTORE_REGS
++ lss 12+4(%esp), %esp # back to 16bit stack
++1: iret
++ CFI_ENDPROC
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++#else
++ENTRY(nmi)
++ RING0_INT_FRAME
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # zero error code
++ movl %esp,%eax # pt_regs pointer
++ call do_nmi
++ orl $NMI_MASK, EFLAGS(%esp)
++ jmp restore_all
++ CFI_ENDPROC
++#endif
++
++KPROBE_ENTRY(int3)
++ RING0_INT_FRAME
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # zero error code
++ movl %esp,%eax # pt_regs pointer
++ call do_int3
++ jmp ret_from_exception
++ CFI_ENDPROC
++ .previous .text
++
++ENTRY(overflow)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_overflow
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(bounds)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_bounds
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(invalid_op)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_invalid_op
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(coprocessor_segment_overrun)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_coprocessor_segment_overrun
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(invalid_TSS)
++ RING0_EC_FRAME
++ pushl $do_invalid_TSS
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(segment_not_present)
++ RING0_EC_FRAME
++ pushl $do_segment_not_present
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(stack_segment)
++ RING0_EC_FRAME
++ pushl $do_stack_segment
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++KPROBE_ENTRY(general_protection)
++ RING0_EC_FRAME
++ pushl $do_general_protection
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++ .previous .text
++
++ENTRY(alignment_check)
++ RING0_EC_FRAME
++ pushl $do_alignment_check
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++KPROBE_ENTRY(page_fault)
++ RING0_EC_FRAME
++ pushl $do_page_fault
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++ .previous .text
++
++#ifdef CONFIG_X86_MCE
++ENTRY(machine_check)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl machine_check_vector
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++#endif
++
++#ifndef CONFIG_XEN
++ENTRY(spurious_interrupt_bug)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_spurious_interrupt_bug
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++#endif /* !CONFIG_XEN */
++
++#ifdef CONFIG_STACK_UNWIND
++ENTRY(arch_unwind_init_running)
++ CFI_STARTPROC
++ movl 4(%esp), %edx
++ movl (%esp), %ecx
++ leal 4(%esp), %eax
++ movl %ebx, EBX(%edx)
++ xorl %ebx, %ebx
++ movl %ebx, ECX(%edx)
++ movl %ebx, EDX(%edx)
++ movl %esi, ESI(%edx)
++ movl %edi, EDI(%edx)
++ movl %ebp, EBP(%edx)
++ movl %ebx, EAX(%edx)
++ movl $__USER_DS, DS(%edx)
++ movl $__USER_DS, ES(%edx)
++ movl %ebx, ORIG_EAX(%edx)
++ movl %ecx, EIP(%edx)
++ movl 12(%esp), %ecx
++ movl $__KERNEL_CS, CS(%edx)
++ movl %ebx, EFLAGS(%edx)
++ movl %eax, OLDESP(%edx)
++ movl 8(%esp), %eax
++ movl %ecx, 8(%esp)
++ movl EBX(%edx), %ebx
++ movl $__KERNEL_DS, OLDSS(%edx)
++ jmpl *%eax
++ CFI_ENDPROC
++ENDPROC(arch_unwind_init_running)
++#endif
++
++ENTRY(fixup_4gb_segment)
++ RING0_EC_FRAME
++ pushl $do_fixup_4gb_segment
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++.section .rodata,"a"
++#include "syscall_table.S"
++
++syscall_table_size=(.-sys_call_table)
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/entry.S linux-2.6.18-xen-3.2.0/arch/i386/kernel/entry.S
+--- linux-2.6.18.8/arch/i386/kernel/entry.S 2007-02-23 15:52:30.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/entry.S 2008-02-15 16:21:49.000000000 -0800
+@@ -269,7 +269,7 @@ ENTRY(sysenter_entry)
+ CFI_STARTPROC simple
+ CFI_DEF_CFA esp, 0
+ CFI_REGISTER esp, ebp
+- movl TSS_sysenter_esp0(%esp),%esp
++ movl SYSENTER_stack_esp0(%esp),%esp
+ sysenter_past_esp:
+ /*
+ * No need to follow this irqs on/off section: the syscall
+@@ -689,7 +689,7 @@ device_not_available_emulate:
+ * that sets up the real kernel stack. Check here, since we can't
+ * allow the wrong stack to be used.
+ *
+- * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
++ * "SYSENTER_stack_esp0+12" is because the NMI/debug handler will have
+ * already pushed 3 words if it hits on the sysenter instruction:
+ * eflags, cs and eip.
+ *
+@@ -701,7 +701,7 @@ device_not_available_emulate:
+ cmpw $__KERNEL_CS,4(%esp); \
+ jne ok; \
+ label: \
+- movl TSS_sysenter_esp0+offset(%esp),%esp; \
++ movl SYSENTER_stack_esp0+offset(%esp),%esp; \
+ pushfl; \
+ pushl $__KERNEL_CS; \
+ pushl $sysenter_past_esp
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/fixup.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/fixup.c
+--- linux-2.6.18.8/arch/i386/kernel/fixup.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/fixup.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,88 @@
++/******************************************************************************
++ * fixup.c
++ *
++ * Binary-rewriting of certain IA32 instructions, on notification by Xen.
++ * Used to avoid repeated slow emulation of common instructions used by the
++ * user-space TLS (Thread-Local Storage) libraries.
++ *
++ * **** NOTE ****
++ * Issues with the binary rewriting have caused it to be removed. Instead
++ * we rely on Xen's emulator to boot the kernel, and then print a banner
++ * message recommending that the user disables /lib/tls.
++ *
++ * Copyright (c) 2004, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <linux/init.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/kernel.h>
++#include <linux/delay.h>
++#include <linux/version.h>
++
++#define DP(_f, _args...) printk(KERN_ALERT " " _f "\n" , ## _args )
++
++fastcall void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
++{
++ static unsigned long printed = 0;
++ char info[100];
++ int i;
++
++ /* Ignore statically-linked init. */
++ if (current->tgid == 1)
++ return;
++
++ VOID(HYPERVISOR_vm_assist(VMASST_CMD_disable,
++ VMASST_TYPE_4gb_segments_notify));
++
++ if (test_and_set_bit(0, &printed))
++ return;
++
++ sprintf(info, "%s (pid=%d)", current->comm, current->tgid);
++
++ DP("");
++ DP("***************************************************************");
++ DP("***************************************************************");
++ DP("** WARNING: Currently emulating unsupported memory accesses **");
++ DP("** in /lib/tls glibc libraries. The emulation is **");
++ DP("** slow. To ensure full performance you should **");
++ DP("** install a 'xen-friendly' (nosegneg) version of **");
++ DP("** the library, or disable tls support by executing **");
++ DP("** the following as root: **");
++ DP("** mv /lib/tls /lib/tls.disabled **");
++ DP("** Offending process: %-38.38s **", info);
++ DP("***************************************************************");
++ DP("***************************************************************");
++ DP("");
++
++ for (i = 5; i > 0; i--) {
++ touch_softlockup_watchdog();
++ printk("Pausing... %d", i);
++ mdelay(1000);
++ printk("\b\b\b\b\b\b\b\b\b\b\b\b");
++ }
++
++ printk("Continuing...\n\n");
++}
++
++static int __init fixup_init(void)
++{
++ WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
++ VMASST_TYPE_4gb_segments_notify));
++ return 0;
++}
++__initcall(fixup_init);
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/head-xen.S linux-2.6.18-xen-3.2.0/arch/i386/kernel/head-xen.S
+--- linux-2.6.18.8/arch/i386/kernel/head-xen.S 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/head-xen.S 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,207 @@
++
++
++.text
++#include <linux/elfnote.h>
++#include <linux/threads.h>
++#include <linux/linkage.h>
++#include <asm/segment.h>
++#include <asm/page.h>
++#include <asm/cache.h>
++#include <asm/thread_info.h>
++#include <asm/asm-offsets.h>
++#include <asm/dwarf2.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/elfnote.h>
++
++/*
++ * References to members of the new_cpu_data structure.
++ */
++
++#define X86 new_cpu_data+CPUINFO_x86
++#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor
++#define X86_MODEL new_cpu_data+CPUINFO_x86_model
++#define X86_MASK new_cpu_data+CPUINFO_x86_mask
++#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math
++#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level
++#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
++#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id
++
++#define VIRT_ENTRY_OFFSET 0x0
++.org VIRT_ENTRY_OFFSET
++ENTRY(startup_32)
++ movl %esi,xen_start_info
++ cld
++
++ /* Set up the stack pointer */
++ movl $(init_thread_union+THREAD_SIZE),%esp
++
++ /* get vendor info */
++ xorl %eax,%eax # call CPUID with 0 -> return vendor ID
++ XEN_CPUID
++ movl %eax,X86_CPUID # save CPUID level
++ movl %ebx,X86_VENDOR_ID # lo 4 chars
++ movl %edx,X86_VENDOR_ID+4 # next 4 chars
++ movl %ecx,X86_VENDOR_ID+8 # last 4 chars
++
++ movl $1,%eax # Use the CPUID instruction to get CPU type
++ XEN_CPUID
++ movb %al,%cl # save reg for future use
++ andb $0x0f,%ah # mask processor family
++ movb %ah,X86
++ andb $0xf0,%al # mask model
++ shrb $4,%al
++ movb %al,X86_MODEL
++ andb $0x0f,%cl # mask mask revision
++ movb %cl,X86_MASK
++ movl %edx,X86_CAPABILITY
++
++ movb $1,X86_HARD_MATH
++
++ xorl %eax,%eax # Clear FS/GS and LDT
++ movl %eax,%fs
++ movl %eax,%gs
++ cld # gcc2 wants the direction flag cleared at all times
++
++ pushl %eax # fake return address
++ jmp start_kernel
++
++#define HYPERCALL_PAGE_OFFSET 0x1000
++.org HYPERCALL_PAGE_OFFSET
++ENTRY(hypercall_page)
++ CFI_STARTPROC
++.skip 0x1000
++ CFI_ENDPROC
++
++/*
++ * Real beginning of normal "text" segment
++ */
++ENTRY(stext)
++ENTRY(_stext)
++
++/*
++ * BSS section
++ */
++.section ".bss.page_aligned","w"
++ENTRY(empty_zero_page)
++ .fill 4096,1,0
++
++/*
++ * This starts the data section.
++ */
++.data
++
++/*
++ * The Global Descriptor Table contains 28 quadwords, per-CPU.
++ */
++ .align L1_CACHE_BYTES
++ENTRY(cpu_gdt_table)
++ .quad 0x0000000000000000 /* NULL descriptor */
++ .quad 0x0000000000000000 /* 0x0b reserved */
++ .quad 0x0000000000000000 /* 0x13 reserved */
++ .quad 0x0000000000000000 /* 0x1b reserved */
++ .quad 0x0000000000000000 /* 0x20 unused */
++ .quad 0x0000000000000000 /* 0x28 unused */
++ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
++ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
++ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
++ .quad 0x0000000000000000 /* 0x4b reserved */
++ .quad 0x0000000000000000 /* 0x53 reserved */
++ .quad 0x0000000000000000 /* 0x5b reserved */
++
++ .quad 0x00cf9a000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
++ .quad 0x00cf92000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
++ .quad 0x00cffa000000ffff /* 0x73 user 4GB code at 0x00000000 */
++ .quad 0x00cff2000000ffff /* 0x7b user 4GB data at 0x00000000 */
++
++ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
++ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
++
++ /*
++ * Segments used for calling PnP BIOS have byte granularity.
++ * They code segments and data segments have fixed 64k limits,
++ * the transfer segment sizes are set at run time.
++ */
++ .quad 0x0000000000000000 /* 0x90 32-bit code */
++ .quad 0x0000000000000000 /* 0x98 16-bit code */
++ .quad 0x0000000000000000 /* 0xa0 16-bit data */
++ .quad 0x0000000000000000 /* 0xa8 16-bit data */
++ .quad 0x0000000000000000 /* 0xb0 16-bit data */
++
++ /*
++ * The APM segments have byte granularity and their bases
++ * are set at run time. All have 64k limits.
++ */
++ .quad 0x0000000000000000 /* 0xb8 APM CS code */
++ .quad 0x0000000000000000 /* 0xc0 APM CS 16 code (16 bit) */
++ .quad 0x0000000000000000 /* 0xc8 APM DS data */
++
++ .quad 0x0000000000000000 /* 0xd0 - ESPFIX 16-bit SS */
++ .quad 0x0000000000000000 /* 0xd8 - unused */
++ .quad 0x0000000000000000 /* 0xe0 - unused */
++ .quad 0x0000000000000000 /* 0xe8 - unused */
++ .quad 0x0000000000000000 /* 0xf0 - unused */
++ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++/*
++ * __xen_guest information
++ */
++.macro utoa value
++ .if (\value) < 0 || (\value) >= 0x10
++ utoa (((\value)>>4)&0x0fffffff)
++ .endif
++ .if ((\value) & 0xf) < 10
++ .byte '0' + ((\value) & 0xf)
++ .else
++ .byte 'A' + ((\value) & 0xf) - 10
++ .endif
++.endm
++
++.section __xen_guest
++ .ascii "GUEST_OS=linux,GUEST_VER=2.6"
++ .ascii ",XEN_VER=xen-3.0"
++ .ascii ",VIRT_BASE=0x"
++ utoa __PAGE_OFFSET
++ .ascii ",ELF_PADDR_OFFSET=0x"
++ utoa __PAGE_OFFSET
++ .ascii ",VIRT_ENTRY=0x"
++ utoa (__PAGE_OFFSET + __PHYSICAL_START + VIRT_ENTRY_OFFSET)
++ .ascii ",HYPERCALL_PAGE=0x"
++ utoa ((__PHYSICAL_START+HYPERCALL_PAGE_OFFSET)>>PAGE_SHIFT)
++ .ascii ",FEATURES=writable_page_tables"
++ .ascii "|writable_descriptor_tables"
++ .ascii "|auto_translated_physmap"
++ .ascii "|pae_pgdir_above_4gb"
++ .ascii "|supervisor_mode_kernel"
++#ifdef CONFIG_X86_PAE
++ .ascii ",PAE=yes[extended-cr3]"
++#else
++ .ascii ",PAE=no"
++#endif
++ .ascii ",LOADER=generic"
++ .byte 0
++#endif /* CONFIG_XEN_COMPAT <= 0x030002 */
++
++
++ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz, "linux")
++ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz, "2.6")
++ ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz, "xen-3.0")
++ ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, .long, __PAGE_OFFSET)
++#if CONFIG_XEN_COMPAT <= 0x030002
++ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .long, __PAGE_OFFSET)
++#else
++ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .long, 0)
++#endif
++ ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .long, startup_32)
++ ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .long, hypercall_page)
++ ELFNOTE(Xen, XEN_ELFNOTE_HV_START_LOW, .long, HYPERVISOR_VIRT_START)
++ ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz, "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel")
++#ifdef CONFIG_X86_PAE
++ ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz, "yes")
++ ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .quad, _PAGE_PRESENT,_PAGE_PRESENT)
++#else
++ ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz, "no")
++ ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .long, _PAGE_PRESENT,_PAGE_PRESENT)
++#endif
++ ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz, "generic")
++ ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long, 1)
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/init_task-xen.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/init_task-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/init_task-xen.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/init_task-xen.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,51 @@
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/init.h>
++#include <linux/init_task.h>
++#include <linux/fs.h>
++#include <linux/mqueue.h>
++
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/desc.h>
++
++static struct fs_struct init_fs = INIT_FS;
++static struct files_struct init_files = INIT_FILES;
++static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
++static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
++
++#define swapper_pg_dir ((pgd_t *)NULL)
++struct mm_struct init_mm = INIT_MM(init_mm);
++#undef swapper_pg_dir
++
++EXPORT_SYMBOL(init_mm);
++
++/*
++ * Initial thread structure.
++ *
++ * We need to make sure that this is THREAD_SIZE aligned due to the
++ * way process stacks are handled. This is done by having a special
++ * "init_task" linker map entry..
++ */
++union thread_union init_thread_union
++ __attribute__((__section__(".data.init_task"))) =
++ { INIT_THREAD_INFO(init_task) };
++
++/*
++ * Initial task structure.
++ *
++ * All other task structs will be allocated on slabs in fork.c
++ */
++struct task_struct init_task = INIT_TASK(init_task);
++
++EXPORT_SYMBOL(init_task);
++
++#ifndef CONFIG_X86_NO_TSS
++/*
++ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
++ * no more per-task TSS's.
++ */
++DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
++#endif
++
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/io_apic-xen.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/io_apic-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/io_apic-xen.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/io_apic-xen.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,2770 @@
++/*
++ * Intel IO-APIC support for multi-Pentium hosts.
++ *
++ * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
++ *
++ * Many thanks to Stig Venaas for trying out countless experimental
++ * patches and reporting/debugging problems patiently!
++ *
++ * (c) 1999, Multiple IO-APIC support, developed by
++ * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
++ * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
++ * further tested and cleaned up by Zach Brown <zab@redhat.com>
++ * and Ingo Molnar <mingo@redhat.com>
++ *
++ * Fixes
++ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
++ * thanks to Eric Gilmore
++ * and Rolf G. Tews
++ * for testing these extensively
++ * Paul Diefenbaugh : Added full ACPI support
++ */
++
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <linux/smp_lock.h>
++#include <linux/mc146818rtc.h>
++#include <linux/compiler.h>
++#include <linux/acpi.h>
++#include <linux/module.h>
++#include <linux/sysdev.h>
++
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/desc.h>
++#include <asm/timer.h>
++#include <asm/i8259.h>
++#include <asm/nmi.h>
++
++#include <mach_apic.h>
++
++#include "io_ports.h"
++
++#ifdef CONFIG_XEN
++
++#include <xen/interface/xen.h>
++#include <xen/interface/physdev.h>
++
++/* Fake i8259 */
++#define make_8259A_irq(_irq) (io_apic_irqs &= ~(1UL<<(_irq)))
++#define disable_8259A_irq(_irq) ((void)0)
++#define i8259A_irq_pending(_irq) (0)
++
++unsigned long io_apic_irqs;
++
++static inline unsigned int xen_io_apic_read(unsigned int apic, unsigned int reg)
++{
++ struct physdev_apic apic_op;
++ int ret;
++
++ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
++ apic_op.reg = reg;
++ ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
++ if (ret)
++ return ret;
++ return apic_op.value;
++}
++
++static inline void xen_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
++{
++ struct physdev_apic apic_op;
++
++ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
++ apic_op.reg = reg;
++ apic_op.value = value;
++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op));
++}
++
++#define io_apic_read(a,r) xen_io_apic_read(a,r)
++#define io_apic_write(a,r,v) xen_io_apic_write(a,r,v)
++
++#endif /* CONFIG_XEN */
++
++int (*ioapic_renumber_irq)(int ioapic, int irq);
++atomic_t irq_mis_count;
++
++/* Where if anywhere is the i8259 connect in external int mode */
++static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
++
++static DEFINE_SPINLOCK(ioapic_lock);
++static DEFINE_SPINLOCK(vector_lock);
++
++int timer_over_8254 __initdata = 1;
++
++/*
++ * Is the SiS APIC rmw bug present ?
++ * -1 = don't know, 0 = no, 1 = yes
++ */
++int sis_apic_bug = -1;
++
++/*
++ * # of IRQ routing registers
++ */
++int nr_ioapic_registers[MAX_IO_APICS];
++
++int disable_timer_pin_1 __initdata;
++
++/*
++ * Rough estimation of how many shared IRQs there are, can
++ * be changed anytime.
++ */
++#define MAX_PLUS_SHARED_IRQS NR_IRQS
++#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
++
++/*
++ * This is performance-critical, we want to do it O(1)
++ *
++ * the indexing order of this array favors 1:1 mappings
++ * between pins and IRQs.
++ */
++
++static struct irq_pin_list {
++ int apic, pin, next;
++} irq_2_pin[PIN_MAP_SIZE];
++
++int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
++#ifdef CONFIG_PCI_MSI
++#define vector_to_irq(vector) \
++ (platform_legacy_irq(vector) ? vector : vector_irq[vector])
++#else
++#define vector_to_irq(vector) (vector)
++#endif
++
++/*
++ * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
++ * shared ISA-space IRQs, so we have to support them. We are super
++ * fast in the common case, and fast for shared ISA-space IRQs.
++ */
++static void add_pin_to_irq(unsigned int irq, int apic, int pin)
++{
++ static int first_free_entry = NR_IRQS;
++ struct irq_pin_list *entry = irq_2_pin + irq;
++
++ while (entry->next)
++ entry = irq_2_pin + entry->next;
++
++ if (entry->pin != -1) {
++ entry->next = first_free_entry;
++ entry = irq_2_pin + entry->next;
++ if (++first_free_entry >= PIN_MAP_SIZE)
++ panic("io_apic.c: whoops");
++ }
++ entry->apic = apic;
++ entry->pin = pin;
++}
++
++#ifdef CONFIG_XEN
++#define clear_IO_APIC() ((void)0)
++#else
++/*
++ * Reroute an IRQ to a different pin.
++ */
++static void __init replace_pin_at_irq(unsigned int irq,
++ int oldapic, int oldpin,
++ int newapic, int newpin)
++{
++ struct irq_pin_list *entry = irq_2_pin + irq;
++
++ while (1) {
++ if (entry->apic == oldapic && entry->pin == oldpin) {
++ entry->apic = newapic;
++ entry->pin = newpin;
++ }
++ if (!entry->next)
++ break;
++ entry = irq_2_pin + entry->next;
++ }
++}
++
++static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsigned long disable)
++{
++ struct irq_pin_list *entry = irq_2_pin + irq;
++ unsigned int pin, reg;
++
++ for (;;) {
++ pin = entry->pin;
++ if (pin == -1)
++ break;
++ reg = io_apic_read(entry->apic, 0x10 + pin*2);
++ reg &= ~disable;
++ reg |= enable;
++ io_apic_modify(entry->apic, 0x10 + pin*2, reg);
++ if (!entry->next)
++ break;
++ entry = irq_2_pin + entry->next;
++ }
++}
++
++/* mask = 1 */
++static void __mask_IO_APIC_irq (unsigned int irq)
++{
++ __modify_IO_APIC_irq(irq, 0x00010000, 0);
++}
++
++/* mask = 0 */
++static void __unmask_IO_APIC_irq (unsigned int irq)
++{
++ __modify_IO_APIC_irq(irq, 0, 0x00010000);
++}
++
++/* mask = 1, trigger = 0 */
++static void __mask_and_edge_IO_APIC_irq (unsigned int irq)
++{
++ __modify_IO_APIC_irq(irq, 0x00010000, 0x00008000);
++}
++
++/* mask = 0, trigger = 1 */
++static void __unmask_and_level_IO_APIC_irq (unsigned int irq)
++{
++ __modify_IO_APIC_irq(irq, 0x00008000, 0x00010000);
++}
++
++static void mask_IO_APIC_irq (unsigned int irq)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ __mask_IO_APIC_irq(irq);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void unmask_IO_APIC_irq (unsigned int irq)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ __unmask_IO_APIC_irq(irq);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
++{
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ /* Check delivery_mode to be sure we're not clearing an SMI pin */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
++ *(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ if (entry.delivery_mode == dest_SMI)
++ return;
++
++ /*
++ * Disable it in the IO-APIC irq-routing table:
++ */
++ memset(&entry, 0, sizeof(entry));
++ entry.mask = 1;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
++ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void clear_IO_APIC (void)
++{
++ int apic, pin;
++
++ for (apic = 0; apic < nr_ioapics; apic++)
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
++ clear_IO_APIC_pin(apic, pin);
++}
++
++#ifdef CONFIG_SMP
++static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
++{
++ unsigned long flags;
++ int pin;
++ struct irq_pin_list *entry = irq_2_pin + irq;
++ unsigned int apicid_value;
++ cpumask_t tmp;
++
++ cpus_and(tmp, cpumask, cpu_online_map);
++ if (cpus_empty(tmp))
++ tmp = TARGET_CPUS;
++
++ cpus_and(cpumask, tmp, CPU_MASK_ALL);
++
++ apicid_value = cpu_mask_to_apicid(cpumask);
++ /* Prepare to do the io_apic_write */
++ apicid_value = apicid_value << 24;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ for (;;) {
++ pin = entry->pin;
++ if (pin == -1)
++ break;
++ io_apic_write(entry->apic, 0x10 + 1 + pin*2, apicid_value);
++ if (!entry->next)
++ break;
++ entry = irq_2_pin + entry->next;
++ }
++ set_irq_info(irq, cpumask);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++#if defined(CONFIG_IRQBALANCE)
++# include <asm/processor.h> /* kernel_thread() */
++# include <linux/kernel_stat.h> /* kstat */
++# include <linux/slab.h> /* kmalloc() */
++# include <linux/timer.h> /* time_after() */
++
++#ifdef CONFIG_BALANCED_IRQ_DEBUG
++# define TDprintk(x...) do { printk("<%ld:%s:%d>: ", jiffies, __FILE__, __LINE__); printk(x); } while (0)
++# define Dprintk(x...) do { TDprintk(x); } while (0)
++# else
++# define TDprintk(x...)
++# define Dprintk(x...)
++# endif
++
++#define IRQBALANCE_CHECK_ARCH -999
++#define MAX_BALANCED_IRQ_INTERVAL (5*HZ)
++#define MIN_BALANCED_IRQ_INTERVAL (HZ/2)
++#define BALANCED_IRQ_MORE_DELTA (HZ/10)
++#define BALANCED_IRQ_LESS_DELTA (HZ)
++
++static int irqbalance_disabled __read_mostly = IRQBALANCE_CHECK_ARCH;
++static int physical_balance __read_mostly;
++static long balanced_irq_interval __read_mostly = MAX_BALANCED_IRQ_INTERVAL;
++
++static struct irq_cpu_info {
++ unsigned long * last_irq;
++ unsigned long * irq_delta;
++ unsigned long irq;
++} irq_cpu_data[NR_CPUS];
++
++#define CPU_IRQ(cpu) (irq_cpu_data[cpu].irq)
++#define LAST_CPU_IRQ(cpu,irq) (irq_cpu_data[cpu].last_irq[irq])
++#define IRQ_DELTA(cpu,irq) (irq_cpu_data[cpu].irq_delta[irq])
++
++#define IDLE_ENOUGH(cpu,now) \
++ (idle_cpu(cpu) && ((now) - per_cpu(irq_stat, (cpu)).idle_timestamp > 1))
++
++#define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask)
++
++#define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i]))
++
++static cpumask_t balance_irq_affinity[NR_IRQS] = {
++ [0 ... NR_IRQS-1] = CPU_MASK_ALL
++};
++
++void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
++{
++ balance_irq_affinity[irq] = mask;
++}
++
++static unsigned long move(int curr_cpu, cpumask_t allowed_mask,
++ unsigned long now, int direction)
++{
++ int search_idle = 1;
++ int cpu = curr_cpu;
++
++ goto inside;
++
++ do {
++ if (unlikely(cpu == curr_cpu))
++ search_idle = 0;
++inside:
++ if (direction == 1) {
++ cpu++;
++ if (cpu >= NR_CPUS)
++ cpu = 0;
++ } else {
++ cpu--;
++ if (cpu == -1)
++ cpu = NR_CPUS-1;
++ }
++ } while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu,allowed_mask) ||
++ (search_idle && !IDLE_ENOUGH(cpu,now)));
++
++ return cpu;
++}
++
++static inline void balance_irq(int cpu, int irq)
++{
++ unsigned long now = jiffies;
++ cpumask_t allowed_mask;
++ unsigned int new_cpu;
++
++ if (irqbalance_disabled)
++ return;
++
++ cpus_and(allowed_mask, cpu_online_map, balance_irq_affinity[irq]);
++ new_cpu = move(cpu, allowed_mask, now, 1);
++ if (cpu != new_cpu) {
++ set_pending_irq(irq, cpumask_of_cpu(new_cpu));
++ }
++}
++
++static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
++{
++ int i, j;
++ Dprintk("Rotating IRQs among CPUs.\n");
++ for_each_online_cpu(i) {
++ for (j = 0; j < NR_IRQS; j++) {
++ if (!irq_desc[j].action)
++ continue;
++ /* Is it a significant load ? */
++ if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i),j) <
++ useful_load_threshold)
++ continue;
++ balance_irq(i, j);
++ }
++ }
++ balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
++ balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
++ return;
++}
++
++static void do_irq_balance(void)
++{
++ int i, j;
++ unsigned long max_cpu_irq = 0, min_cpu_irq = (~0);
++ unsigned long move_this_load = 0;
++ int max_loaded = 0, min_loaded = 0;
++ int load;
++ unsigned long useful_load_threshold = balanced_irq_interval + 10;
++ int selected_irq;
++ int tmp_loaded, first_attempt = 1;
++ unsigned long tmp_cpu_irq;
++ unsigned long imbalance = 0;
++ cpumask_t allowed_mask, target_cpu_mask, tmp;
++
++ for_each_possible_cpu(i) {
++ int package_index;
++ CPU_IRQ(i) = 0;
++ if (!cpu_online(i))
++ continue;
++ package_index = CPU_TO_PACKAGEINDEX(i);
++ for (j = 0; j < NR_IRQS; j++) {
++ unsigned long value_now, delta;
++ /* Is this an active IRQ? */
++ if (!irq_desc[j].action)
++ continue;
++ if ( package_index == i )
++ IRQ_DELTA(package_index,j) = 0;
++ /* Determine the total count per processor per IRQ */
++ value_now = (unsigned long) kstat_cpu(i).irqs[j];
++
++ /* Determine the activity per processor per IRQ */
++ delta = value_now - LAST_CPU_IRQ(i,j);
++
++ /* Update last_cpu_irq[][] for the next time */
++ LAST_CPU_IRQ(i,j) = value_now;
++
++ /* Ignore IRQs whose rate is less than the clock */
++ if (delta < useful_load_threshold)
++ continue;
++ /* update the load for the processor or package total */
++ IRQ_DELTA(package_index,j) += delta;
++
++ /* Keep track of the higher numbered sibling as well */
++ if (i != package_index)
++ CPU_IRQ(i) += delta;
++ /*
++ * We have sibling A and sibling B in the package
++ *
++ * cpu_irq[A] = load for cpu A + load for cpu B
++ * cpu_irq[B] = load for cpu B
++ */
++ CPU_IRQ(package_index) += delta;
++ }
++ }
++ /* Find the least loaded processor package */
++ for_each_online_cpu(i) {
++ if (i != CPU_TO_PACKAGEINDEX(i))
++ continue;
++ if (min_cpu_irq > CPU_IRQ(i)) {
++ min_cpu_irq = CPU_IRQ(i);
++ min_loaded = i;
++ }
++ }
++ max_cpu_irq = ULONG_MAX;
++
++tryanothercpu:
++ /* Look for heaviest loaded processor.
++ * We may come back to get the next heaviest loaded processor.
++ * Skip processors with trivial loads.
++ */
++ tmp_cpu_irq = 0;
++ tmp_loaded = -1;
++ for_each_online_cpu(i) {
++ if (i != CPU_TO_PACKAGEINDEX(i))
++ continue;
++ if (max_cpu_irq <= CPU_IRQ(i))
++ continue;
++ if (tmp_cpu_irq < CPU_IRQ(i)) {
++ tmp_cpu_irq = CPU_IRQ(i);
++ tmp_loaded = i;
++ }
++ }
++
++ if (tmp_loaded == -1) {
++ /* In the case of small number of heavy interrupt sources,
++ * loading some of the cpus too much. We use Ingo's original
++ * approach to rotate them around.
++ */
++ if (!first_attempt && imbalance >= useful_load_threshold) {
++ rotate_irqs_among_cpus(useful_load_threshold);
++ return;
++ }
++ goto not_worth_the_effort;
++ }
++
++ first_attempt = 0; /* heaviest search */
++ max_cpu_irq = tmp_cpu_irq; /* load */
++ max_loaded = tmp_loaded; /* processor */
++ imbalance = (max_cpu_irq - min_cpu_irq) / 2;
++
++ Dprintk("max_loaded cpu = %d\n", max_loaded);
++ Dprintk("min_loaded cpu = %d\n", min_loaded);
++ Dprintk("max_cpu_irq load = %ld\n", max_cpu_irq);
++ Dprintk("min_cpu_irq load = %ld\n", min_cpu_irq);
++ Dprintk("load imbalance = %lu\n", imbalance);
++
++ /* if imbalance is less than approx 10% of max load, then
++ * observe diminishing returns action. - quit
++ */
++ if (imbalance < (max_cpu_irq >> 3)) {
++ Dprintk("Imbalance too trivial\n");
++ goto not_worth_the_effort;
++ }
++
++tryanotherirq:
++ /* if we select an IRQ to move that can't go where we want, then
++ * see if there is another one to try.
++ */
++ move_this_load = 0;
++ selected_irq = -1;
++ for (j = 0; j < NR_IRQS; j++) {
++ /* Is this an active IRQ? */
++ if (!irq_desc[j].action)
++ continue;
++ if (imbalance <= IRQ_DELTA(max_loaded,j))
++ continue;
++ /* Try to find the IRQ that is closest to the imbalance
++ * without going over.
++ */
++ if (move_this_load < IRQ_DELTA(max_loaded,j)) {
++ move_this_load = IRQ_DELTA(max_loaded,j);
++ selected_irq = j;
++ }
++ }
++ if (selected_irq == -1) {
++ goto tryanothercpu;
++ }
++
++ imbalance = move_this_load;
++
++ /* For physical_balance case, we accumlated both load
++ * values in the one of the siblings cpu_irq[],
++ * to use the same code for physical and logical processors
++ * as much as possible.
++ *
++ * NOTE: the cpu_irq[] array holds the sum of the load for
++ * sibling A and sibling B in the slot for the lowest numbered
++ * sibling (A), _AND_ the load for sibling B in the slot for
++ * the higher numbered sibling.
++ *
++ * We seek the least loaded sibling by making the comparison
++ * (A+B)/2 vs B
++ */
++ load = CPU_IRQ(min_loaded) >> 1;
++ for_each_cpu_mask(j, cpu_sibling_map[min_loaded]) {
++ if (load > CPU_IRQ(j)) {
++ /* This won't change cpu_sibling_map[min_loaded] */
++ load = CPU_IRQ(j);
++ min_loaded = j;
++ }
++ }
++
++ cpus_and(allowed_mask,
++ cpu_online_map,
++ balance_irq_affinity[selected_irq]);
++ target_cpu_mask = cpumask_of_cpu(min_loaded);
++ cpus_and(tmp, target_cpu_mask, allowed_mask);
++
++ if (!cpus_empty(tmp)) {
++
++ Dprintk("irq = %d moved to cpu = %d\n",
++ selected_irq, min_loaded);
++ /* mark for change destination */
++ set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded));
++
++ /* Since we made a change, come back sooner to
++ * check for more variation.
++ */
++ balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
++ balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
++ return;
++ }
++ goto tryanotherirq;
++
++not_worth_the_effort:
++ /*
++ * if we did not find an IRQ to move, then adjust the time interval
++ * upward
++ */
++ balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL,
++ balanced_irq_interval + BALANCED_IRQ_MORE_DELTA);
++ Dprintk("IRQ worth rotating not found\n");
++ return;
++}
++
++static int balanced_irq(void *unused)
++{
++ int i;
++ unsigned long prev_balance_time = jiffies;
++ long time_remaining = balanced_irq_interval;
++
++ daemonize("kirqd");
++
++ /* push everything to CPU 0 to give us a starting point. */
++ for (i = 0 ; i < NR_IRQS ; i++) {
++ irq_desc[i].pending_mask = cpumask_of_cpu(0);
++ set_pending_irq(i, cpumask_of_cpu(0));
++ }
++
++ for ( ; ; ) {
++ time_remaining = schedule_timeout_interruptible(time_remaining);
++ try_to_freeze();
++ if (time_after(jiffies,
++ prev_balance_time+balanced_irq_interval)) {
++ preempt_disable();
++ do_irq_balance();
++ prev_balance_time = jiffies;
++ time_remaining = balanced_irq_interval;
++ preempt_enable();
++ }
++ }
++ return 0;
++}
++
++static int __init balanced_irq_init(void)
++{
++ int i;
++ struct cpuinfo_x86 *c;
++ cpumask_t tmp;
++
++ cpus_shift_right(tmp, cpu_online_map, 2);
++ c = &boot_cpu_data;
++ /* When not overwritten by the command line ask subarchitecture. */
++ if (irqbalance_disabled == IRQBALANCE_CHECK_ARCH)
++ irqbalance_disabled = NO_BALANCE_IRQ;
++ if (irqbalance_disabled)
++ return 0;
++
++ /* disable irqbalance completely if there is only one processor online */
++ if (num_online_cpus() < 2) {
++ irqbalance_disabled = 1;
++ return 0;
++ }
++ /*
++ * Enable physical balance only if more than 1 physical processor
++ * is present
++ */
++ if (smp_num_siblings > 1 && !cpus_empty(tmp))
++ physical_balance = 1;
++
++ for_each_online_cpu(i) {
++ irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
++ irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
++ if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) {
++ printk(KERN_ERR "balanced_irq_init: out of memory");
++ goto failed;
++ }
++ memset(irq_cpu_data[i].irq_delta,0,sizeof(unsigned long) * NR_IRQS);
++ memset(irq_cpu_data[i].last_irq,0,sizeof(unsigned long) * NR_IRQS);
++ }
++
++ printk(KERN_INFO "Starting balanced_irq\n");
++ if (kernel_thread(balanced_irq, NULL, CLONE_KERNEL) >= 0)
++ return 0;
++ else
++ printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
++failed:
++ for_each_possible_cpu(i) {
++ kfree(irq_cpu_data[i].irq_delta);
++ irq_cpu_data[i].irq_delta = NULL;
++ kfree(irq_cpu_data[i].last_irq);
++ irq_cpu_data[i].last_irq = NULL;
++ }
++ return 0;
++}
++
++int __init irqbalance_disable(char *str)
++{
++ irqbalance_disabled = 1;
++ return 1;
++}
++
++__setup("noirqbalance", irqbalance_disable);
++
++late_initcall(balanced_irq_init);
++#endif /* CONFIG_IRQBALANCE */
++#endif /* CONFIG_SMP */
++#endif
++
++#ifndef CONFIG_SMP
++void fastcall send_IPI_self(int vector)
++{
++#ifndef CONFIG_XEN
++ unsigned int cfg;
++
++ /*
++ * Wait for idle.
++ */
++ apic_wait_icr_idle();
++ cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
++ /*
++ * Send the IPI. The write to APIC_ICR fires this off.
++ */
++ apic_write_around(APIC_ICR, cfg);
++#endif
++}
++#endif /* !CONFIG_SMP */
++
++
++/*
++ * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
++ * specific CPU-side IRQs.
++ */
++
++#define MAX_PIRQS 8
++static int pirq_entries [MAX_PIRQS];
++static int pirqs_enabled;
++int skip_ioapic_setup;
++
++static int __init ioapic_setup(char *str)
++{
++ skip_ioapic_setup = 1;
++ return 1;
++}
++
++__setup("noapic", ioapic_setup);
++
++static int __init ioapic_pirq_setup(char *str)
++{
++ int i, max;
++ int ints[MAX_PIRQS+1];
++
++ get_options(str, ARRAY_SIZE(ints), ints);
++
++ for (i = 0; i < MAX_PIRQS; i++)
++ pirq_entries[i] = -1;
++
++ pirqs_enabled = 1;
++ apic_printk(APIC_VERBOSE, KERN_INFO
++ "PIRQ redirection, working around broken MP-BIOS.\n");
++ max = MAX_PIRQS;
++ if (ints[0] < MAX_PIRQS)
++ max = ints[0];
++
++ for (i = 0; i < max; i++) {
++ apic_printk(APIC_VERBOSE, KERN_DEBUG
++ "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
++ /*
++ * PIRQs are mapped upside down, usually.
++ */
++ pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
++ }
++ return 1;
++}
++
++__setup("pirq=", ioapic_pirq_setup);
++
++/*
++ * Find the IRQ entry number of a certain pin.
++ */
++static int find_irq_entry(int apic, int pin, int type)
++{
++ int i;
++
++ for (i = 0; i < mp_irq_entries; i++)
++ if (mp_irqs[i].mpc_irqtype == type &&
++ (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
++ mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
++ mp_irqs[i].mpc_dstirq == pin)
++ return i;
++
++ return -1;
++}
++
++/*
++ * Find the pin to which IRQ[irq] (ISA) is connected
++ */
++static int __init find_isa_irq_pin(int irq, int type)
++{
++ int i;
++
++ for (i = 0; i < mp_irq_entries; i++) {
++ int lbus = mp_irqs[i].mpc_srcbus;
++
++ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_NEC98
++ ) &&
++ (mp_irqs[i].mpc_irqtype == type) &&
++ (mp_irqs[i].mpc_srcbusirq == irq))
++
++ return mp_irqs[i].mpc_dstirq;
++ }
++ return -1;
++}
++
++static int __init find_isa_irq_apic(int irq, int type)
++{
++ int i;
++
++ for (i = 0; i < mp_irq_entries; i++) {
++ int lbus = mp_irqs[i].mpc_srcbus;
++
++ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_NEC98
++ ) &&
++ (mp_irqs[i].mpc_irqtype == type) &&
++ (mp_irqs[i].mpc_srcbusirq == irq))
++ break;
++ }
++ if (i < mp_irq_entries) {
++ int apic;
++ for(apic = 0; apic < nr_ioapics; apic++) {
++ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
++ return apic;
++ }
++ }
++
++ return -1;
++}
++
++/*
++ * Find a specific PCI IRQ entry.
++ * Not an __init, possibly needed by modules
++ */
++static int pin_2_irq(int idx, int apic, int pin);
++
++int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
++{
++ int apic, i, best_guess = -1;
++
++ apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, "
++ "slot:%d, pin:%d.\n", bus, slot, pin);
++ if (mp_bus_id_to_pci_bus[bus] == -1) {
++ printk(KERN_WARNING "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
++ return -1;
++ }
++ for (i = 0; i < mp_irq_entries; i++) {
++ int lbus = mp_irqs[i].mpc_srcbus;
++
++ for (apic = 0; apic < nr_ioapics; apic++)
++ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
++ mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
++ break;
++
++ if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
++ !mp_irqs[i].mpc_irqtype &&
++ (bus == lbus) &&
++ (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
++ int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
++
++ if (!(apic || IO_APIC_IRQ(irq)))
++ continue;
++
++ if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
++ return irq;
++ /*
++ * Use the first all-but-pin matching entry as a
++ * best-guess fuzzy result for broken mptables.
++ */
++ if (best_guess < 0)
++ best_guess = irq;
++ }
++ }
++ return best_guess;
++}
++EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
++
++/*
++ * This function currently is only a helper for the i386 smp boot process where
++ * we need to reprogram the ioredtbls to cater for the cpus which have come online
++ * so mask in all cases should simply be TARGET_CPUS
++ */
++#ifdef CONFIG_SMP
++#ifndef CONFIG_XEN
++void __init setup_ioapic_dest(void)
++{
++ int pin, ioapic, irq, irq_entry;
++
++ if (skip_ioapic_setup == 1)
++ return;
++
++ for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
++ for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
++ irq_entry = find_irq_entry(ioapic, pin, mp_INT);
++ if (irq_entry == -1)
++ continue;
++ irq = pin_2_irq(irq_entry, ioapic, pin);
++ set_ioapic_affinity_irq(irq, TARGET_CPUS);
++ }
++
++ }
++}
++#endif /* !CONFIG_XEN */
++#endif
++
++/*
++ * EISA Edge/Level control register, ELCR
++ */
++static int EISA_ELCR(unsigned int irq)
++{
++ if (irq < 16) {
++ unsigned int port = 0x4d0 + (irq >> 3);
++ return (inb(port) >> (irq & 7)) & 1;
++ }
++ apic_printk(APIC_VERBOSE, KERN_INFO
++ "Broken MPtable reports ISA irq %d\n", irq);
++ return 0;
++}
++
++/* EISA interrupts are always polarity zero and can be edge or level
++ * trigger depending on the ELCR value. If an interrupt is listed as
++ * EISA conforming in the MP table, that means its trigger type must
++ * be read in from the ELCR */
++
++#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
++#define default_EISA_polarity(idx) (0)
++
++/* ISA interrupts are always polarity zero edge triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_ISA_trigger(idx) (0)
++#define default_ISA_polarity(idx) (0)
++
++/* PCI interrupts are always polarity one level triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_PCI_trigger(idx) (1)
++#define default_PCI_polarity(idx) (1)
++
++/* MCA interrupts are always polarity zero level triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_MCA_trigger(idx) (1)
++#define default_MCA_polarity(idx) (0)
++
++/* NEC98 interrupts are always polarity zero edge triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_NEC98_trigger(idx) (0)
++#define default_NEC98_polarity(idx) (0)
++
++static int __init MPBIOS_polarity(int idx)
++{
++ int bus = mp_irqs[idx].mpc_srcbus;
++ int polarity;
++
++ /*
++ * Determine IRQ line polarity (high active or low active):
++ */
++ switch (mp_irqs[idx].mpc_irqflag & 3)
++ {
++ case 0: /* conforms, ie. bus-type dependent polarity */
++ {
++ switch (mp_bus_id_to_type[bus])
++ {
++ case MP_BUS_ISA: /* ISA pin */
++ {
++ polarity = default_ISA_polarity(idx);
++ break;
++ }
++ case MP_BUS_EISA: /* EISA pin */
++ {
++ polarity = default_EISA_polarity(idx);
++ break;
++ }
++ case MP_BUS_PCI: /* PCI pin */
++ {
++ polarity = default_PCI_polarity(idx);
++ break;
++ }
++ case MP_BUS_MCA: /* MCA pin */
++ {
++ polarity = default_MCA_polarity(idx);
++ break;
++ }
++ case MP_BUS_NEC98: /* NEC 98 pin */
++ {
++ polarity = default_NEC98_polarity(idx);
++ break;
++ }
++ default:
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ polarity = 1;
++ break;
++ }
++ }
++ break;
++ }
++ case 1: /* high active */
++ {
++ polarity = 0;
++ break;
++ }
++ case 2: /* reserved */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ polarity = 1;
++ break;
++ }
++ case 3: /* low active */
++ {
++ polarity = 1;
++ break;
++ }
++ default: /* invalid */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ polarity = 1;
++ break;
++ }
++ }
++ return polarity;
++}
++
++static int MPBIOS_trigger(int idx)
++{
++ int bus = mp_irqs[idx].mpc_srcbus;
++ int trigger;
++
++ /*
++ * Determine IRQ trigger mode (edge or level sensitive):
++ */
++ switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
++ {
++ case 0: /* conforms, ie. bus-type dependent */
++ {
++ switch (mp_bus_id_to_type[bus])
++ {
++ case MP_BUS_ISA: /* ISA pin */
++ {
++ trigger = default_ISA_trigger(idx);
++ break;
++ }
++ case MP_BUS_EISA: /* EISA pin */
++ {
++ trigger = default_EISA_trigger(idx);
++ break;
++ }
++ case MP_BUS_PCI: /* PCI pin */
++ {
++ trigger = default_PCI_trigger(idx);
++ break;
++ }
++ case MP_BUS_MCA: /* MCA pin */
++ {
++ trigger = default_MCA_trigger(idx);
++ break;
++ }
++ case MP_BUS_NEC98: /* NEC 98 pin */
++ {
++ trigger = default_NEC98_trigger(idx);
++ break;
++ }
++ default:
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ trigger = 1;
++ break;
++ }
++ }
++ break;
++ }
++ case 1: /* edge */
++ {
++ trigger = 0;
++ break;
++ }
++ case 2: /* reserved */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ trigger = 1;
++ break;
++ }
++ case 3: /* level */
++ {
++ trigger = 1;
++ break;
++ }
++ default: /* invalid */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ trigger = 0;
++ break;
++ }
++ }
++ return trigger;
++}
++
++static inline int irq_polarity(int idx)
++{
++ return MPBIOS_polarity(idx);
++}
++
++static inline int irq_trigger(int idx)
++{
++ return MPBIOS_trigger(idx);
++}
++
++static int pin_2_irq(int idx, int apic, int pin)
++{
++ int irq, i;
++ int bus = mp_irqs[idx].mpc_srcbus;
++
++ /*
++ * Debugging check, we are in big trouble if this message pops up!
++ */
++ if (mp_irqs[idx].mpc_dstirq != pin)
++ printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
++
++ switch (mp_bus_id_to_type[bus])
++ {
++ case MP_BUS_ISA: /* ISA pin */
++ case MP_BUS_EISA:
++ case MP_BUS_MCA:
++ case MP_BUS_NEC98:
++ {
++ irq = mp_irqs[idx].mpc_srcbusirq;
++ break;
++ }
++ case MP_BUS_PCI: /* PCI pin */
++ {
++ /*
++ * PCI IRQs are mapped in order
++ */
++ i = irq = 0;
++ while (i < apic)
++ irq += nr_ioapic_registers[i++];
++ irq += pin;
++
++ /*
++ * For MPS mode, so far only needed by ES7000 platform
++ */
++ if (ioapic_renumber_irq)
++ irq = ioapic_renumber_irq(apic, irq);
++
++ break;
++ }
++ default:
++ {
++ printk(KERN_ERR "unknown bus type %d.\n",bus);
++ irq = 0;
++ break;
++ }
++ }
++
++ /*
++ * PCI IRQ command line redirection. Yes, limits are hardcoded.
++ */
++ if ((pin >= 16) && (pin <= 23)) {
++ if (pirq_entries[pin-16] != -1) {
++ if (!pirq_entries[pin-16]) {
++ apic_printk(APIC_VERBOSE, KERN_DEBUG
++ "disabling PIRQ%d\n", pin-16);
++ } else {
++ irq = pirq_entries[pin-16];
++ apic_printk(APIC_VERBOSE, KERN_DEBUG
++ "using PIRQ%d -> IRQ %d\n",
++ pin-16, irq);
++ }
++ }
++ }
++ return irq;
++}
++
++static inline int IO_APIC_irq_trigger(int irq)
++{
++ int apic, idx, pin;
++
++ for (apic = 0; apic < nr_ioapics; apic++) {
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++ idx = find_irq_entry(apic,pin,mp_INT);
++ if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
++ return irq_trigger(idx);
++ }
++ }
++ /*
++ * nonexistent IRQs are edge default
++ */
++ return 0;
++}
++
++/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
++u8 irq_vector[NR_IRQ_VECTORS] __read_mostly; /* = { FIRST_DEVICE_VECTOR , 0 }; */
++
++int assign_irq_vector(int irq)
++{
++ unsigned long flags;
++ int vector;
++ struct physdev_irq irq_op;
++
++ BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
++
++ spin_lock_irqsave(&vector_lock, flags);
++
++ if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
++ spin_unlock_irqrestore(&vector_lock, flags);
++ return IO_APIC_VECTOR(irq);
++ }
++
++ irq_op.irq = irq;
++ if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
++ spin_unlock_irqrestore(&vector_lock, flags);
++ return -ENOSPC;
++ }
++
++ vector = irq_op.vector;
++ vector_irq[vector] = irq;
++ if (irq != AUTO_ASSIGN)
++ IO_APIC_VECTOR(irq) = vector;
++
++ spin_unlock_irqrestore(&vector_lock, flags);
++
++ return vector;
++}
++
++#ifndef CONFIG_XEN
++static struct hw_interrupt_type ioapic_level_type;
++static struct hw_interrupt_type ioapic_edge_type;
++
++#define IOAPIC_AUTO -1
++#define IOAPIC_EDGE 0
++#define IOAPIC_LEVEL 1
++
++static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
++{
++ unsigned idx;
++
++ idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
++
++ if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
++ trigger == IOAPIC_LEVEL)
++ irq_desc[idx].chip = &ioapic_level_type;
++ else
++ irq_desc[idx].chip = &ioapic_edge_type;
++ set_intr_gate(vector, interrupt[idx]);
++}
++#else
++#define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
++#endif
++
++static void __init setup_IO_APIC_irqs(void)
++{
++ struct IO_APIC_route_entry entry;
++ int apic, pin, idx, irq, first_notcon = 1, vector;
++ unsigned long flags;
++
++ apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
++
++ for (apic = 0; apic < nr_ioapics; apic++) {
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++
++ /*
++ * add it to the IO-APIC irq-routing table:
++ */
++ memset(&entry,0,sizeof(entry));
++
++ entry.delivery_mode = INT_DELIVERY_MODE;
++ entry.dest_mode = INT_DEST_MODE;
++ entry.mask = 0; /* enable IRQ */
++ entry.dest.logical.logical_dest =
++ cpu_mask_to_apicid(TARGET_CPUS);
++
++ idx = find_irq_entry(apic,pin,mp_INT);
++ if (idx == -1) {
++ if (first_notcon) {
++ apic_printk(APIC_VERBOSE, KERN_DEBUG
++ " IO-APIC (apicid-pin) %d-%d",
++ mp_ioapics[apic].mpc_apicid,
++ pin);
++ first_notcon = 0;
++ } else
++ apic_printk(APIC_VERBOSE, ", %d-%d",
++ mp_ioapics[apic].mpc_apicid, pin);
++ continue;
++ }
++
++ entry.trigger = irq_trigger(idx);
++ entry.polarity = irq_polarity(idx);
++
++ if (irq_trigger(idx)) {
++ entry.trigger = 1;
++ entry.mask = 1;
++ }
++
++ irq = pin_2_irq(idx, apic, pin);
++ /*
++ * skip adding the timer int on secondary nodes, which causes
++ * a small but painful rift in the time-space continuum
++ */
++ if (multi_timer_check(apic, irq))
++ continue;
++ else
++ add_pin_to_irq(irq, apic, pin);
++
++ if (/*!apic &&*/ !IO_APIC_IRQ(irq))
++ continue;
++
++ if (IO_APIC_IRQ(irq)) {
++ vector = assign_irq_vector(irq);
++ entry.vector = vector;
++ ioapic_register_intr(irq, vector, IOAPIC_AUTO);
++
++ if (!apic && (irq < 16))
++ disable_8259A_irq(irq);
++ }
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
++ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
++ set_native_irq_info(irq, TARGET_CPUS);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ }
++ }
++
++ if (!first_notcon)
++ apic_printk(APIC_VERBOSE, " not connected.\n");
++}
++
++/*
++ * Set up the 8259A-master output pin:
++ */
++#ifndef CONFIG_XEN
++static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
++{
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ memset(&entry,0,sizeof(entry));
++
++ disable_8259A_irq(0);
++
++ /* mask LVT0 */
++ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
++
++ /*
++ * We use logical delivery to get the timer IRQ
++ * to the first CPU.
++ */
++ entry.dest_mode = INT_DEST_MODE;
++ entry.mask = 0; /* unmask IRQ now */
++ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++ entry.delivery_mode = INT_DELIVERY_MODE;
++ entry.polarity = 0;
++ entry.trigger = 0;
++ entry.vector = vector;
++
++ /*
++ * The timer IRQ doesn't have to know that behind the
++ * scene we have a 8259A-master in AEOI mode ...
++ */
++ irq_desc[0].chip = &ioapic_edge_type;
++
++ /*
++ * Add it to the IO-APIC irq-routing table:
++ */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
++ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ enable_8259A_irq(0);
++}
++
++static inline void UNEXPECTED_IO_APIC(void)
++{
++}
++
++void __init print_IO_APIC(void)
++{
++ int apic, i;
++ union IO_APIC_reg_00 reg_00;
++ union IO_APIC_reg_01 reg_01;
++ union IO_APIC_reg_02 reg_02;
++ union IO_APIC_reg_03 reg_03;
++ unsigned long flags;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
++ for (i = 0; i < nr_ioapics; i++)
++ printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
++ mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
++
++ /*
++ * We are a bit conservative about what we expect. We have to
++ * know about every hardware change ASAP.
++ */
++ printk(KERN_INFO "testing the IO APIC.......................\n");
++
++ for (apic = 0; apic < nr_ioapics; apic++) {
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(apic, 0);
++ reg_01.raw = io_apic_read(apic, 1);
++ if (reg_01.bits.version >= 0x10)
++ reg_02.raw = io_apic_read(apic, 2);
++ if (reg_01.bits.version >= 0x20)
++ reg_03.raw = io_apic_read(apic, 3);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
++ printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
++ printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
++ printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
++ printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
++ if (reg_00.bits.ID >= get_physical_broadcast())
++ UNEXPECTED_IO_APIC();
++ if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
++ UNEXPECTED_IO_APIC();
++
++ printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw);
++ printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
++ if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
++ (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
++ (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
++ (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
++ (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
++ (reg_01.bits.entries != 0x2E) &&
++ (reg_01.bits.entries != 0x3F)
++ )
++ UNEXPECTED_IO_APIC();
++
++ printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
++ printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
++ if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
++ (reg_01.bits.version != 0x10) && /* oldest IO-APICs */
++ (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
++ (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
++ (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */
++ )
++ UNEXPECTED_IO_APIC();
++ if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
++ UNEXPECTED_IO_APIC();
++
++ /*
++ * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
++ * but the value of reg_02 is read as the previous read register
++ * value, so ignore it if reg_02 == reg_01.
++ */
++ if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
++ printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
++ printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
++ if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
++ UNEXPECTED_IO_APIC();
++ }
++
++ /*
++ * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
++ * or reg_03, but the value of reg_0[23] is read as the previous read
++ * register value, so ignore it if reg_03 == reg_0[12].
++ */
++ if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
++ reg_03.raw != reg_01.raw) {
++ printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
++ printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
++ if (reg_03.bits.__reserved_1)
++ UNEXPECTED_IO_APIC();
++ }
++
++ printk(KERN_DEBUG ".... IRQ redirection table:\n");
++
++ printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
++ " Stat Dest Deli Vect: \n");
++
++ for (i = 0; i <= reg_01.bits.entries; i++) {
++ struct IO_APIC_route_entry entry;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
++ *(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ printk(KERN_DEBUG " %02x %03X %02X ",
++ i,
++ entry.dest.logical.logical_dest,
++ entry.dest.physical.physical_dest
++ );
++
++ printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
++ entry.mask,
++ entry.trigger,
++ entry.irr,
++ entry.polarity,
++ entry.delivery_status,
++ entry.dest_mode,
++ entry.delivery_mode,
++ entry.vector
++ );
++ }
++ }
++ if (use_pci_vector())
++ printk(KERN_INFO "Using vector-based indexing\n");
++ printk(KERN_DEBUG "IRQ to pin mappings:\n");
++ for (i = 0; i < NR_IRQS; i++) {
++ struct irq_pin_list *entry = irq_2_pin + i;
++ if (entry->pin < 0)
++ continue;
++ if (use_pci_vector() && !platform_legacy_irq(i))
++ printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
++ else
++ printk(KERN_DEBUG "IRQ%d ", i);
++ for (;;) {
++ printk("-> %d:%d", entry->apic, entry->pin);
++ if (!entry->next)
++ break;
++ entry = irq_2_pin + entry->next;
++ }
++ printk("\n");
++ }
++
++ printk(KERN_INFO ".................................... done.\n");
++
++ return;
++}
++
++static void print_APIC_bitfield (int base)
++{
++ unsigned int v;
++ int i, j;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
++ for (i = 0; i < 8; i++) {
++ v = apic_read(base + i*0x10);
++ for (j = 0; j < 32; j++) {
++ if (v & (1<<j))
++ printk("1");
++ else
++ printk("0");
++ }
++ printk("\n");
++ }
++}
++
++void /*__init*/ print_local_APIC(void * dummy)
++{
++ unsigned int v, ver, maxlvt;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
++ smp_processor_id(), hard_smp_processor_id());
++ v = apic_read(APIC_ID);
++ printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v));
++ v = apic_read(APIC_LVR);
++ printk(KERN_INFO "... APIC VERSION: %08x\n", v);
++ ver = GET_APIC_VERSION(v);
++ maxlvt = get_maxlvt();
++
++ v = apic_read(APIC_TASKPRI);
++ printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
++
++ if (APIC_INTEGRATED(ver)) { /* !82489DX */
++ v = apic_read(APIC_ARBPRI);
++ printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
++ v & APIC_ARBPRI_MASK);
++ v = apic_read(APIC_PROCPRI);
++ printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
++ }
++
++ v = apic_read(APIC_EOI);
++ printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
++ v = apic_read(APIC_RRR);
++ printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
++ v = apic_read(APIC_LDR);
++ printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
++ v = apic_read(APIC_DFR);
++ printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
++ v = apic_read(APIC_SPIV);
++ printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
++
++ printk(KERN_DEBUG "... APIC ISR field:\n");
++ print_APIC_bitfield(APIC_ISR);
++ printk(KERN_DEBUG "... APIC TMR field:\n");
++ print_APIC_bitfield(APIC_TMR);
++ printk(KERN_DEBUG "... APIC IRR field:\n");
++ print_APIC_bitfield(APIC_IRR);
++
++ if (APIC_INTEGRATED(ver)) { /* !82489DX */
++ if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
++ apic_write(APIC_ESR, 0);
++ v = apic_read(APIC_ESR);
++ printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
++ }
++
++ v = apic_read(APIC_ICR);
++ printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
++ v = apic_read(APIC_ICR2);
++ printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
++
++ v = apic_read(APIC_LVTT);
++ printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
++
++ if (maxlvt > 3) { /* PC is LVT#4. */
++ v = apic_read(APIC_LVTPC);
++ printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
++ }
++ v = apic_read(APIC_LVT0);
++ printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
++ v = apic_read(APIC_LVT1);
++ printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
++
++ if (maxlvt > 2) { /* ERR is LVT#3. */
++ v = apic_read(APIC_LVTERR);
++ printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
++ }
++
++ v = apic_read(APIC_TMICT);
++ printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
++ v = apic_read(APIC_TMCCT);
++ printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
++ v = apic_read(APIC_TDCR);
++ printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
++ printk("\n");
++}
++
++void print_all_local_APICs (void)
++{
++ on_each_cpu(print_local_APIC, NULL, 1, 1);
++}
++
++void /*__init*/ print_PIC(void)
++{
++ unsigned int v;
++ unsigned long flags;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk(KERN_DEBUG "\nprinting PIC contents\n");
++
++ spin_lock_irqsave(&i8259A_lock, flags);
++
++ v = inb(0xa1) << 8 | inb(0x21);
++ printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
++
++ v = inb(0xa0) << 8 | inb(0x20);
++ printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
++
++ outb(0x0b,0xa0);
++ outb(0x0b,0x20);
++ v = inb(0xa0) << 8 | inb(0x20);
++ outb(0x0a,0xa0);
++ outb(0x0a,0x20);
++
++ spin_unlock_irqrestore(&i8259A_lock, flags);
++
++ printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
++
++ v = inb(0x4d1) << 8 | inb(0x4d0);
++ printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
++}
++#endif /* !CONFIG_XEN */
++
++static void __init enable_IO_APIC(void)
++{
++ union IO_APIC_reg_01 reg_01;
++ int i8259_apic, i8259_pin;
++ int i, apic;
++ unsigned long flags;
++
++ for (i = 0; i < PIN_MAP_SIZE; i++) {
++ irq_2_pin[i].pin = -1;
++ irq_2_pin[i].next = 0;
++ }
++ if (!pirqs_enabled)
++ for (i = 0; i < MAX_PIRQS; i++)
++ pirq_entries[i] = -1;
++
++ /*
++ * The number of IO-APIC IRQ registers (== #pins):
++ */
++ for (apic = 0; apic < nr_ioapics; apic++) {
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_01.raw = io_apic_read(apic, 1);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ nr_ioapic_registers[apic] = reg_01.bits.entries+1;
++ }
++ for(apic = 0; apic < nr_ioapics; apic++) {
++ int pin;
++ /* See if any of the pins is in ExtINT mode */
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++ struct IO_APIC_route_entry entry;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
++ *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++
++ /* If the interrupt line is enabled and in ExtInt mode
++ * I have found the pin where the i8259 is connected.
++ */
++ if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
++ ioapic_i8259.apic = apic;
++ ioapic_i8259.pin = pin;
++ goto found_i8259;
++ }
++ }
++ }
++ found_i8259:
++ /* Look to see what if the MP table has reported the ExtINT */
++ /* If we could not find the appropriate pin by looking at the ioapic
++ * the i8259 probably is not connected the ioapic but give the
++ * mptable a chance anyway.
++ */
++ i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
++ i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
++ /* Trust the MP table if nothing is setup in the hardware */
++ if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
++ printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
++ ioapic_i8259.pin = i8259_pin;
++ ioapic_i8259.apic = i8259_apic;
++ }
++ /* Complain if the MP table and the hardware disagree */
++ if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
++ (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
++ {
++ printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
++ }
++
++ /*
++ * Do not trust the IO-APIC being empty at bootup
++ */
++ clear_IO_APIC();
++}
++
++/*
++ * Not an __init, needed by the reboot code
++ */
++void disable_IO_APIC(void)
++{
++ /*
++ * Clear the IO-APIC before rebooting:
++ */
++ clear_IO_APIC();
++
++#ifndef CONFIG_XEN
++ /*
++ * If the i8259 is routed through an IOAPIC
++ * Put that IOAPIC in virtual wire mode
++ * so legacy interrupts can be delivered.
++ */
++ if (ioapic_i8259.pin != -1) {
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ memset(&entry, 0, sizeof(entry));
++ entry.mask = 0; /* Enabled */
++ entry.trigger = 0; /* Edge */
++ entry.irr = 0;
++ entry.polarity = 0; /* High */
++ entry.delivery_status = 0;
++ entry.dest_mode = 0; /* Physical */
++ entry.delivery_mode = dest_ExtINT; /* ExtInt */
++ entry.vector = 0;
++ entry.dest.physical.physical_dest =
++ GET_APIC_ID(apic_read(APIC_ID));
++
++ /*
++ * Add it to the IO-APIC irq-routing table:
++ */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
++ *(((int *)&entry)+1));
++ io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
++ *(((int *)&entry)+0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ }
++ disconnect_bsp_APIC(ioapic_i8259.pin != -1);
++#endif
++}
++
++/*
++ * function to set the IO-APIC physical IDs based on the
++ * values stored in the MPC table.
++ *
++ * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
++ */
++
++#if !defined(CONFIG_XEN) && !defined(CONFIG_X86_NUMAQ)
++static void __init setup_ioapic_ids_from_mpc(void)
++{
++ union IO_APIC_reg_00 reg_00;
++ physid_mask_t phys_id_present_map;
++ int apic;
++ int i;
++ unsigned char old_id;
++ unsigned long flags;
++
++ /*
++ * Don't check I/O APIC IDs for xAPIC systems. They have
++ * no meaning without the serial APIC bus.
++ */
++ if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
++ || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
++ return;
++ /*
++ * This is broken; anything with a real cpu count has to
++ * circumvent this idiocy regardless.
++ */
++ phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
++
++ /*
++ * Set the IOAPIC ID to the value stored in the MPC table.
++ */
++ for (apic = 0; apic < nr_ioapics; apic++) {
++
++ /* Read the register 0 value */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(apic, 0);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ old_id = mp_ioapics[apic].mpc_apicid;
++
++ if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) {
++ printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
++ apic, mp_ioapics[apic].mpc_apicid);
++ printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
++ reg_00.bits.ID);
++ mp_ioapics[apic].mpc_apicid = reg_00.bits.ID;
++ }
++
++ /*
++ * Sanity check, is the ID really free? Every APIC in a
++ * system must have a unique ID or we get lots of nice
++ * 'stuck on smp_invalidate_needed IPI wait' messages.
++ */
++ if (check_apicid_used(phys_id_present_map,
++ mp_ioapics[apic].mpc_apicid)) {
++ printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
++ apic, mp_ioapics[apic].mpc_apicid);
++ for (i = 0; i < get_physical_broadcast(); i++)
++ if (!physid_isset(i, phys_id_present_map))
++ break;
++ if (i >= get_physical_broadcast())
++ panic("Max APIC ID exceeded!\n");
++ printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
++ i);
++ physid_set(i, phys_id_present_map);
++ mp_ioapics[apic].mpc_apicid = i;
++ } else {
++ physid_mask_t tmp;
++ tmp = apicid_to_cpu_present(mp_ioapics[apic].mpc_apicid);
++ apic_printk(APIC_VERBOSE, "Setting %d in the "
++ "phys_id_present_map\n",
++ mp_ioapics[apic].mpc_apicid);
++ physids_or(phys_id_present_map, phys_id_present_map, tmp);
++ }
++
++
++ /*
++ * We need to adjust the IRQ routing table
++ * if the ID changed.
++ */
++ if (old_id != mp_ioapics[apic].mpc_apicid)
++ for (i = 0; i < mp_irq_entries; i++)
++ if (mp_irqs[i].mpc_dstapic == old_id)
++ mp_irqs[i].mpc_dstapic
++ = mp_ioapics[apic].mpc_apicid;
++
++ /*
++ * Read the right value from the MPC table and
++ * write it into the ID register.
++ */
++ apic_printk(APIC_VERBOSE, KERN_INFO
++ "...changing IO-APIC physical APIC ID to %d ...",
++ mp_ioapics[apic].mpc_apicid);
++
++ reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0, reg_00.raw);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ /*
++ * Sanity check
++ */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(apic, 0);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
++ printk("could not set ID!\n");
++ else
++ apic_printk(APIC_VERBOSE, " ok.\n");
++ }
++}
++#else
++static void __init setup_ioapic_ids_from_mpc(void) { }
++#endif
++
++#ifndef CONFIG_XEN
++/*
++ * There is a nasty bug in some older SMP boards, their mptable lies
++ * about the timer IRQ. We do the following to work around the situation:
++ *
++ * - timer IRQ defaults to IO-APIC IRQ
++ * - if this function detects that timer IRQs are defunct, then we fall
++ * back to ISA timer IRQs
++ */
++static int __init timer_irq_works(void)
++{
++ unsigned long t1 = jiffies;
++
++ local_irq_enable();
++ /* Let ten ticks pass... */
++ mdelay((10 * 1000) / HZ);
++
++ /*
++ * Expect a few ticks at least, to be sure some possible
++ * glue logic does not lock up after one or two first
++ * ticks in a non-ExtINT mode. Also the local APIC
++ * might have cached one ExtINT interrupt. Finally, at
++ * least one tick may be lost due to delays.
++ */
++ if (jiffies - t1 > 4)
++ return 1;
++
++ return 0;
++}
++
++/*
++ * In the SMP+IOAPIC case it might happen that there are an unspecified
++ * number of pending IRQ events unhandled. These cases are very rare,
++ * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
++ * better to do it this way as thus we do not have to be aware of
++ * 'pending' interrupts in the IRQ path, except at this point.
++ */
++/*
++ * Edge triggered needs to resend any interrupt
++ * that was delayed but this is now handled in the device
++ * independent code.
++ */
++
++/*
++ * Starting up a edge-triggered IO-APIC interrupt is
++ * nasty - we need to make sure that we get the edge.
++ * If it is already asserted for some reason, we need
++ * return 1 to indicate that is was pending.
++ *
++ * This is not complete - we should be able to fake
++ * an edge even if it isn't on the 8259A...
++ */
++static unsigned int startup_edge_ioapic_irq(unsigned int irq)
++{
++ int was_pending = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ if (irq < 16) {
++ disable_8259A_irq(irq);
++ if (i8259A_irq_pending(irq))
++ was_pending = 1;
++ }
++ __unmask_IO_APIC_irq(irq);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return was_pending;
++}
++
++/*
++ * Once we have recorded IRQ_PENDING already, we can mask the
++ * interrupt for real. This prevents IRQ storms from unhandled
++ * devices.
++ */
++static void ack_edge_ioapic_irq(unsigned int irq)
++{
++ move_irq(irq);
++ if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
++ == (IRQ_PENDING | IRQ_DISABLED))
++ mask_IO_APIC_irq(irq);
++ ack_APIC_irq();
++}
++
++/*
++ * Level triggered interrupts can just be masked,
++ * and shutting down and starting up the interrupt
++ * is the same as enabling and disabling them -- except
++ * with a startup need to return a "was pending" value.
++ *
++ * Level triggered interrupts are special because we
++ * do not touch any IO-APIC register while handling
++ * them. We ack the APIC in the end-IRQ handler, not
++ * in the start-IRQ-handler. Protection against reentrance
++ * from the same interrupt is still provided, both by the
++ * generic IRQ layer and by the fact that an unacked local
++ * APIC does not accept IRQs.
++ */
++static unsigned int startup_level_ioapic_irq (unsigned int irq)
++{
++ unmask_IO_APIC_irq(irq);
++
++ return 0; /* don't check for pending */
++}
++
++static void end_level_ioapic_irq (unsigned int irq)
++{
++ unsigned long v;
++ int i;
++
++ move_irq(irq);
++/*
++ * It appears there is an erratum which affects at least version 0x11
++ * of I/O APIC (that's the 82093AA and cores integrated into various
++ * chipsets). Under certain conditions a level-triggered interrupt is
++ * erroneously delivered as edge-triggered one but the respective IRR
++ * bit gets set nevertheless. As a result the I/O unit expects an EOI
++ * message but it will never arrive and further interrupts are blocked
++ * from the source. The exact reason is so far unknown, but the
++ * phenomenon was observed when two consecutive interrupt requests
++ * from a given source get delivered to the same CPU and the source is
++ * temporarily disabled in between.
++ *
++ * A workaround is to simulate an EOI message manually. We achieve it
++ * by setting the trigger mode to edge and then to level when the edge
++ * trigger mode gets detected in the TMR of a local APIC for a
++ * level-triggered interrupt. We mask the source for the time of the
++ * operation to prevent an edge-triggered interrupt escaping meanwhile.
++ * The idea is from Manfred Spraul. --macro
++ */
++ i = IO_APIC_VECTOR(irq);
++
++ v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
++
++ ack_APIC_irq();
++
++ if (!(v & (1 << (i & 0x1f)))) {
++ atomic_inc(&irq_mis_count);
++ spin_lock(&ioapic_lock);
++ __mask_and_edge_IO_APIC_irq(irq);
++ __unmask_and_level_IO_APIC_irq(irq);
++ spin_unlock(&ioapic_lock);
++ }
++}
++
++#ifdef CONFIG_PCI_MSI
++static unsigned int startup_edge_ioapic_vector(unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ return startup_edge_ioapic_irq(irq);
++}
++
++static void ack_edge_ioapic_vector(unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ move_native_irq(vector);
++ ack_edge_ioapic_irq(irq);
++}
++
++static unsigned int startup_level_ioapic_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ return startup_level_ioapic_irq (irq);
++}
++
++static void end_level_ioapic_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ move_native_irq(vector);
++ end_level_ioapic_irq(irq);
++}
++
++static void mask_IO_APIC_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ mask_IO_APIC_irq(irq);
++}
++
++static void unmask_IO_APIC_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ unmask_IO_APIC_irq(irq);
++}
++
++#ifdef CONFIG_SMP
++static void set_ioapic_affinity_vector (unsigned int vector,
++ cpumask_t cpu_mask)
++{
++ int irq = vector_to_irq(vector);
++
++ set_native_irq_info(vector, cpu_mask);
++ set_ioapic_affinity_irq(irq, cpu_mask);
++}
++#endif
++#endif
++
++static int ioapic_retrigger(unsigned int irq)
++{
++ send_IPI_self(IO_APIC_VECTOR(irq));
++
++ return 1;
++}
++
++/*
++ * Level and edge triggered IO-APIC interrupts need different handling,
++ * so we use two separate IRQ descriptors. Edge triggered IRQs can be
++ * handled with the level-triggered descriptor, but that one has slightly
++ * more overhead. Level-triggered interrupts cannot be handled with the
++ * edge-triggered handler, without risking IRQ storms and other ugly
++ * races.
++ */
++static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
++ .typename = "IO-APIC-edge",
++ .startup = startup_edge_ioapic,
++ .shutdown = shutdown_edge_ioapic,
++ .enable = enable_edge_ioapic,
++ .disable = disable_edge_ioapic,
++ .ack = ack_edge_ioapic,
++ .end = end_edge_ioapic,
++#ifdef CONFIG_SMP
++ .set_affinity = set_ioapic_affinity,
++#endif
++ .retrigger = ioapic_retrigger,
++};
++
++static struct hw_interrupt_type ioapic_level_type __read_mostly = {
++ .typename = "IO-APIC-level",
++ .startup = startup_level_ioapic,
++ .shutdown = shutdown_level_ioapic,
++ .enable = enable_level_ioapic,
++ .disable = disable_level_ioapic,
++ .ack = mask_and_ack_level_ioapic,
++ .end = end_level_ioapic,
++#ifdef CONFIG_SMP
++ .set_affinity = set_ioapic_affinity,
++#endif
++ .retrigger = ioapic_retrigger,
++};
++#endif /* !CONFIG_XEN */
++
++static inline void init_IO_APIC_traps(void)
++{
++ int irq;
++
++ /*
++ * NOTE! The local APIC isn't very good at handling
++ * multiple interrupts at the same interrupt level.
++ * As the interrupt level is determined by taking the
++ * vector number and shifting that right by 4, we
++ * want to spread these out a bit so that they don't
++ * all fall in the same interrupt level.
++ *
++ * Also, we've got to be careful not to trash gate
++ * 0x80, because int 0x80 is hm, kind of importantish. ;)
++ */
++ for (irq = 0; irq < NR_IRQS ; irq++) {
++ int tmp = irq;
++ if (use_pci_vector()) {
++ if (!platform_legacy_irq(tmp))
++ if ((tmp = vector_to_irq(tmp)) == -1)
++ continue;
++ }
++ if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
++ /*
++ * Hmm.. We don't have an entry for this,
++ * so default to an old-fashioned 8259
++ * interrupt if we can..
++ */
++ if (irq < 16)
++ make_8259A_irq(irq);
++#ifndef CONFIG_XEN
++ else
++ /* Strange. Oh, well.. */
++ irq_desc[irq].chip = &no_irq_type;
++#endif
++ }
++ }
++}
++
++#ifndef CONFIG_XEN
++static void enable_lapic_irq (unsigned int irq)
++{
++ unsigned long v;
++
++ v = apic_read(APIC_LVT0);
++ apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
++}
++
++static void disable_lapic_irq (unsigned int irq)
++{
++ unsigned long v;
++
++ v = apic_read(APIC_LVT0);
++ apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
++}
++
++static void ack_lapic_irq (unsigned int irq)
++{
++ ack_APIC_irq();
++}
++
++static void end_lapic_irq (unsigned int i) { /* nothing */ }
++
++static struct hw_interrupt_type lapic_irq_type __read_mostly = {
++ .typename = "local-APIC-edge",
++ .startup = NULL, /* startup_irq() not used for IRQ0 */
++ .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
++ .enable = enable_lapic_irq,
++ .disable = disable_lapic_irq,
++ .ack = ack_lapic_irq,
++ .end = end_lapic_irq
++};
++
++static void setup_nmi (void)
++{
++ /*
++ * Dirty trick to enable the NMI watchdog ...
++ * We put the 8259A master into AEOI mode and
++ * unmask on all local APICs LVT0 as NMI.
++ *
++ * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
++ * is from Maciej W. Rozycki - so we do not have to EOI from
++ * the NMI handler or the timer interrupt.
++ */
++ apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
++
++ on_each_cpu(enable_NMI_through_LVT0, NULL, 1, 1);
++
++ apic_printk(APIC_VERBOSE, " done.\n");
++}
++
++/*
++ * This looks a bit hackish but it's about the only one way of sending
++ * a few INTA cycles to 8259As and any associated glue logic. ICR does
++ * not support the ExtINT mode, unfortunately. We need to send these
++ * cycles as some i82489DX-based boards have glue logic that keeps the
++ * 8259A interrupt line asserted until INTA. --macro
++ */
++static inline void unlock_ExtINT_logic(void)
++{
++ int apic, pin, i;
++ struct IO_APIC_route_entry entry0, entry1;
++ unsigned char save_control, save_freq_select;
++ unsigned long flags;
++
++ pin = find_isa_irq_pin(8, mp_INT);
++ apic = find_isa_irq_apic(8, mp_INT);
++ if (pin == -1)
++ return;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
++ *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ clear_IO_APIC_pin(apic, pin);
++
++ memset(&entry1, 0, sizeof(entry1));
++
++ entry1.dest_mode = 0; /* physical delivery */
++ entry1.mask = 0; /* unmask IRQ now */
++ entry1.dest.physical.physical_dest = hard_smp_processor_id();
++ entry1.delivery_mode = dest_ExtINT;
++ entry1.polarity = entry0.polarity;
++ entry1.trigger = 0;
++ entry1.vector = 0;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
++ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ save_control = CMOS_READ(RTC_CONTROL);
++ save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
++ CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
++ RTC_FREQ_SELECT);
++ CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
++
++ i = 100;
++ while (i-- > 0) {
++ mdelay(10);
++ if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
++ i -= 10;
++ }
++
++ CMOS_WRITE(save_control, RTC_CONTROL);
++ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
++ clear_IO_APIC_pin(apic, pin);
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
++ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++int timer_uses_ioapic_pin_0;
++
++/*
++ * This code may look a bit paranoid, but it's supposed to cooperate with
++ * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
++ * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
++ * fanatically on his truly buggy board.
++ */
++static inline void check_timer(void)
++{
++ int apic1, pin1, apic2, pin2;
++ int vector;
++
++ /*
++ * get/set the timer IRQ vector:
++ */
++ disable_8259A_irq(0);
++ vector = assign_irq_vector(0);
++ set_intr_gate(vector, interrupt[0]);
++
++ /*
++ * Subtle, code in do_timer_interrupt() expects an AEOI
++ * mode for the 8259A whenever interrupts are routed
++ * through I/O APICs. Also IRQ0 has to be enabled in
++ * the 8259A which implies the virtual wire has to be
++ * disabled in the local APIC.
++ */
++ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
++ init_8259A(1);
++ timer_ack = 1;
++ if (timer_over_8254 > 0)
++ enable_8259A_irq(0);
++
++ pin1 = find_isa_irq_pin(0, mp_INT);
++ apic1 = find_isa_irq_apic(0, mp_INT);
++ pin2 = ioapic_i8259.pin;
++ apic2 = ioapic_i8259.apic;
++
++ if (pin1 == 0)
++ timer_uses_ioapic_pin_0 = 1;
++
++ printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
++ vector, apic1, pin1, apic2, pin2);
++
++ if (pin1 != -1) {
++ /*
++ * Ok, does IRQ0 through the IOAPIC work?
++ */
++ unmask_IO_APIC_irq(0);
++ if (timer_irq_works()) {
++ if (nmi_watchdog == NMI_IO_APIC) {
++ disable_8259A_irq(0);
++ setup_nmi();
++ enable_8259A_irq(0);
++ }
++ if (disable_timer_pin_1 > 0)
++ clear_IO_APIC_pin(0, pin1);
++ return;
++ }
++ clear_IO_APIC_pin(apic1, pin1);
++ printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to "
++ "IO-APIC\n");
++ }
++
++ printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
++ if (pin2 != -1) {
++ printk("\n..... (found pin %d) ...", pin2);
++ /*
++ * legacy devices should be connected to IO APIC #0
++ */
++ setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
++ if (timer_irq_works()) {
++ printk("works.\n");
++ if (pin1 != -1)
++ replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
++ else
++ add_pin_to_irq(0, apic2, pin2);
++ if (nmi_watchdog == NMI_IO_APIC) {
++ setup_nmi();
++ }
++ return;
++ }
++ /*
++ * Cleanup, just in case ...
++ */
++ clear_IO_APIC_pin(apic2, pin2);
++ }
++ printk(" failed.\n");
++
++ if (nmi_watchdog == NMI_IO_APIC) {
++ printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
++ nmi_watchdog = 0;
++ }
++
++ printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
++
++ disable_8259A_irq(0);
++ irq_desc[0].chip = &lapic_irq_type;
++ apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
++ enable_8259A_irq(0);
++
++ if (timer_irq_works()) {
++ printk(" works.\n");
++ return;
++ }
++ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
++ printk(" failed.\n");
++
++ printk(KERN_INFO "...trying to set up timer as ExtINT IRQ...");
++
++ timer_ack = 0;
++ init_8259A(0);
++ make_8259A_irq(0);
++ apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
++
++ unlock_ExtINT_logic();
++
++ if (timer_irq_works()) {
++ printk(" works.\n");
++ return;
++ }
++ printk(" failed :(.\n");
++ panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
++ "report. Then try booting with the 'noapic' option");
++}
++#else
++int timer_uses_ioapic_pin_0 = 0;
++#define check_timer() ((void)0)
++#endif
++
++/*
++ *
++ * IRQ's that are handled by the PIC in the MPS IOAPIC case.
++ * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
++ * Linux doesn't really care, as it's not actually used
++ * for any interrupt handling anyway.
++ */
++#define PIC_IRQS (1 << PIC_CASCADE_IR)
++
++void __init setup_IO_APIC(void)
++{
++ enable_IO_APIC();
++
++ if (acpi_ioapic)
++ io_apic_irqs = ~0; /* all IRQs go through IOAPIC */
++ else
++ io_apic_irqs = ~PIC_IRQS;
++
++ printk("ENABLING IO-APIC IRQs\n");
++
++ /*
++ * Set up IO-APIC IRQ routing.
++ */
++ if (!acpi_ioapic)
++ setup_ioapic_ids_from_mpc();
++#ifndef CONFIG_XEN
++ sync_Arb_IDs();
++#endif
++ setup_IO_APIC_irqs();
++ init_IO_APIC_traps();
++ check_timer();
++ if (!acpi_ioapic)
++ print_IO_APIC();
++}
++
++static int __init setup_disable_8254_timer(char *s)
++{
++ timer_over_8254 = -1;
++ return 1;
++}
++static int __init setup_enable_8254_timer(char *s)
++{
++ timer_over_8254 = 2;
++ return 1;
++}
++
++__setup("disable_8254_timer", setup_disable_8254_timer);
++__setup("enable_8254_timer", setup_enable_8254_timer);
++
++/*
++ * Called after all the initialization is done. If we didnt find any
++ * APIC bugs then we can allow the modify fast path
++ */
++
++static int __init io_apic_bug_finalize(void)
++{
++ if(sis_apic_bug == -1)
++ sis_apic_bug = 0;
++ if (is_initial_xendomain()) {
++ struct xen_platform_op op = { .cmd = XENPF_platform_quirk };
++ op.u.platform_quirk.quirk_id = sis_apic_bug ?
++ QUIRK_IOAPIC_BAD_REGSEL : QUIRK_IOAPIC_GOOD_REGSEL;
++ VOID(HYPERVISOR_platform_op(&op));
++ }
++ return 0;
++}
++
++late_initcall(io_apic_bug_finalize);
++
++struct sysfs_ioapic_data {
++ struct sys_device dev;
++ struct IO_APIC_route_entry entry[0];
++};
++static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
++
++static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
++{
++ struct IO_APIC_route_entry *entry;
++ struct sysfs_ioapic_data *data;
++ unsigned long flags;
++ int i;
++
++ data = container_of(dev, struct sysfs_ioapic_data, dev);
++ entry = data->entry;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
++ *(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
++ *(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
++ }
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return 0;
++}
++
++static int ioapic_resume(struct sys_device *dev)
++{
++ struct IO_APIC_route_entry *entry;
++ struct sysfs_ioapic_data *data;
++ unsigned long flags;
++ union IO_APIC_reg_00 reg_00;
++ int i;
++
++ data = container_of(dev, struct sysfs_ioapic_data, dev);
++ entry = data->entry;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(dev->id, 0);
++ if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
++ reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
++ io_apic_write(dev->id, 0, reg_00.raw);
++ }
++ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
++ io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
++ io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
++ }
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return 0;
++}
++
++static struct sysdev_class ioapic_sysdev_class = {
++ set_kset_name("ioapic"),
++ .suspend = ioapic_suspend,
++ .resume = ioapic_resume,
++};
++
++static int __init ioapic_init_sysfs(void)
++{
++ struct sys_device * dev;
++ int i, size, error = 0;
++
++ error = sysdev_class_register(&ioapic_sysdev_class);
++ if (error)
++ return error;
++
++ for (i = 0; i < nr_ioapics; i++ ) {
++ size = sizeof(struct sys_device) + nr_ioapic_registers[i]
++ * sizeof(struct IO_APIC_route_entry);
++ mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
++ if (!mp_ioapic_data[i]) {
++ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
++ continue;
++ }
++ memset(mp_ioapic_data[i], 0, size);
++ dev = &mp_ioapic_data[i]->dev;
++ dev->id = i;
++ dev->cls = &ioapic_sysdev_class;
++ error = sysdev_register(dev);
++ if (error) {
++ kfree(mp_ioapic_data[i]);
++ mp_ioapic_data[i] = NULL;
++ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
++ continue;
++ }
++ }
++
++ return 0;
++}
++
++device_initcall(ioapic_init_sysfs);
++
++/* --------------------------------------------------------------------------
++ ACPI-based IOAPIC Configuration
++ -------------------------------------------------------------------------- */
++
++#ifdef CONFIG_ACPI
++
++int __init io_apic_get_unique_id (int ioapic, int apic_id)
++{
++#ifndef CONFIG_XEN
++ union IO_APIC_reg_00 reg_00;
++ static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
++ physid_mask_t tmp;
++ unsigned long flags;
++ int i = 0;
++
++ /*
++ * The P4 platform supports up to 256 APIC IDs on two separate APIC
++ * buses (one for LAPICs, one for IOAPICs), where predecessors only
++ * supports up to 16 on one shared APIC bus.
++ *
++ * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
++ * advantage of new APIC bus architecture.
++ */
++
++ if (physids_empty(apic_id_map))
++ apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(ioapic, 0);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ if (apic_id >= get_physical_broadcast()) {
++ printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
++ "%d\n", ioapic, apic_id, reg_00.bits.ID);
++ apic_id = reg_00.bits.ID;
++ }
++
++ /*
++ * Every APIC in a system must have a unique ID or we get lots of nice
++ * 'stuck on smp_invalidate_needed IPI wait' messages.
++ */
++ if (check_apicid_used(apic_id_map, apic_id)) {
++
++ for (i = 0; i < get_physical_broadcast(); i++) {
++ if (!check_apicid_used(apic_id_map, i))
++ break;
++ }
++
++ if (i == get_physical_broadcast())
++ panic("Max apic_id exceeded!\n");
++
++ printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
++ "trying %d\n", ioapic, apic_id, i);
++
++ apic_id = i;
++ }
++
++ tmp = apicid_to_cpu_present(apic_id);
++ physids_or(apic_id_map, apic_id_map, tmp);
++
++ if (reg_00.bits.ID != apic_id) {
++ reg_00.bits.ID = apic_id;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(ioapic, 0, reg_00.raw);
++ reg_00.raw = io_apic_read(ioapic, 0);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ /* Sanity check */
++ if (reg_00.bits.ID != apic_id) {
++ printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
++ return -1;
++ }
++ }
++
++ apic_printk(APIC_VERBOSE, KERN_INFO
++ "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
++#endif /* !CONFIG_XEN */
++
++ return apic_id;
++}
++
++
++int __init io_apic_get_version (int ioapic)
++{
++ union IO_APIC_reg_01 reg_01;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_01.raw = io_apic_read(ioapic, 1);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return reg_01.bits.version;
++}
++
++
++int __init io_apic_get_redir_entries (int ioapic)
++{
++ union IO_APIC_reg_01 reg_01;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_01.raw = io_apic_read(ioapic, 1);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return reg_01.bits.entries;
++}
++
++
++int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
++{
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ if (!IO_APIC_IRQ(irq)) {
++ printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
++ ioapic);
++ return -EINVAL;
++ }
++
++ /*
++ * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
++ * Note that we mask (disable) IRQs now -- these get enabled when the
++ * corresponding device driver registers for this IRQ.
++ */
++
++ memset(&entry,0,sizeof(entry));
++
++ entry.delivery_mode = INT_DELIVERY_MODE;
++ entry.dest_mode = INT_DEST_MODE;
++ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++ entry.trigger = edge_level;
++ entry.polarity = active_high_low;
++ entry.mask = 1;
++
++ /*
++ * IRQs < 16 are already in the irq_2_pin[] map
++ */
++ if (irq >= 16)
++ add_pin_to_irq(irq, ioapic, pin);
++
++ entry.vector = assign_irq_vector(irq);
++
++ apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry "
++ "(%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i)\n", ioapic,
++ mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
++ edge_level, active_high_low);
++
++ ioapic_register_intr(irq, entry.vector, edge_level);
++
++ if (!ioapic && (irq < 16))
++ disable_8259A_irq(irq);
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
++ io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
++ set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return 0;
++}
++
++#endif /* CONFIG_ACPI */
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/ioport-xen.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/ioport-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/ioport-xen.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/ioport-xen.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,123 @@
++/*
++ * linux/arch/i386/kernel/ioport.c
++ *
++ * This contains the io-permission bitmap code - written by obz, with changes
++ * by Linus.
++ */
++
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/capability.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/ioport.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/stddef.h>
++#include <linux/slab.h>
++#include <linux/thread_info.h>
++#include <xen/interface/physdev.h>
++
++/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
++static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
++{
++ unsigned long mask;
++ unsigned long *bitmap_base = bitmap + (base / BITS_PER_LONG);
++ unsigned int low_index = base & (BITS_PER_LONG-1);
++ int length = low_index + extent;
++
++ if (low_index != 0) {
++ mask = (~0UL << low_index);
++ if (length < BITS_PER_LONG)
++ mask &= ~(~0UL << length);
++ if (new_value)
++ *bitmap_base++ |= mask;
++ else
++ *bitmap_base++ &= ~mask;
++ length -= BITS_PER_LONG;
++ }
++
++ mask = (new_value ? ~0UL : 0UL);
++ while (length >= BITS_PER_LONG) {
++ *bitmap_base++ = mask;
++ length -= BITS_PER_LONG;
++ }
++
++ if (length > 0) {
++ mask = ~(~0UL << length);
++ if (new_value)
++ *bitmap_base++ |= mask;
++ else
++ *bitmap_base++ &= ~mask;
++ }
++}
++
++
++/*
++ * this changes the io permissions bitmap in the current task.
++ */
++asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
++{
++ struct thread_struct * t = &current->thread;
++ unsigned long *bitmap;
++ struct physdev_set_iobitmap set_iobitmap;
++
++ if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
++ return -EINVAL;
++ if (turn_on && !capable(CAP_SYS_RAWIO))
++ return -EPERM;
++
++ /*
++ * If it's the first ioperm() call in this thread's lifetime, set the
++ * IO bitmap up. ioperm() is much less timing critical than clone(),
++ * this is why we delay this operation until now:
++ */
++ if (!t->io_bitmap_ptr) {
++ bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
++ if (!bitmap)
++ return -ENOMEM;
++
++ memset(bitmap, 0xff, IO_BITMAP_BYTES);
++ t->io_bitmap_ptr = bitmap;
++ set_thread_flag(TIF_IO_BITMAP);
++
++ set_xen_guest_handle(set_iobitmap.bitmap, (char *)bitmap);
++ set_iobitmap.nr_ports = IO_BITMAP_BITS;
++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap,
++ &set_iobitmap));
++ }
++
++ set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
++
++ return 0;
++}
++
++/*
++ * sys_iopl has to be used when you want to access the IO ports
++ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
++ * you'd need 8kB of bitmaps/process, which is a bit excessive.
++ *
++ * Here we just change the eflags value on the stack: we allow
++ * only the super-user to do it. This depends on the stack-layout
++ * on system-call entry - see also fork() and the signal handling
++ * code.
++ */
++
++asmlinkage long sys_iopl(unsigned long unused)
++{
++ volatile struct pt_regs * regs = (struct pt_regs *) &unused;
++ unsigned int level = regs->ebx;
++ struct thread_struct *t = &current->thread;
++ unsigned int old = (t->iopl >> 12) & 3;
++
++ if (level > 3)
++ return -EINVAL;
++ /* Trying to gain more privileges? */
++ if (level > old) {
++ if (!capable(CAP_SYS_RAWIO))
++ return -EPERM;
++ }
++ t->iopl = level << 12;
++ set_iopl_mask(t->iopl);
++ return 0;
++}
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/irq-xen.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/irq-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/irq-xen.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/irq-xen.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,324 @@
++/*
++ * linux/arch/i386/kernel/irq.c
++ *
++ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
++ *
++ * This file contains the lowest level x86-specific interrupt
++ * entry, irq-stacks and irq statistics code. All the remaining
++ * irq logic is done by the generic kernel/irq/ code and
++ * by the x86-specific irq controller code. (e.g. i8259.c and
++ * io_apic.c.)
++ */
++
++#include <asm/uaccess.h>
++#include <linux/module.h>
++#include <linux/seq_file.h>
++#include <linux/interrupt.h>
++#include <linux/kernel_stat.h>
++#include <linux/notifier.h>
++#include <linux/cpu.h>
++#include <linux/delay.h>
++
++DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
++EXPORT_PER_CPU_SYMBOL(irq_stat);
++
++#ifndef CONFIG_X86_LOCAL_APIC
++/*
++ * 'what should we do if we get a hw irq event on an illegal vector'.
++ * each architecture has to answer this themselves.
++ */
++void ack_bad_irq(unsigned int irq)
++{
++ printk("unexpected IRQ trap at vector %02x\n", irq);
++}
++#endif
++
++#ifdef CONFIG_4KSTACKS
++/*
++ * per-CPU IRQ handling contexts (thread information and stack)
++ */
++union irq_ctx {
++ struct thread_info tinfo;
++ u32 stack[THREAD_SIZE/sizeof(u32)];
++};
++
++static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
++static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
++#endif
++
++/*
++ * do_IRQ handles all normal device IRQ's (the special
++ * SMP cross-CPU interrupts have their own specific
++ * handlers).
++ */
++fastcall unsigned int do_IRQ(struct pt_regs *regs)
++{
++ /* high bit used in ret_from_ code */
++ int irq = ~regs->orig_eax;
++#ifdef CONFIG_4KSTACKS
++ union irq_ctx *curctx, *irqctx;
++ u32 *isp;
++#endif
++
++ if (unlikely((unsigned)irq >= NR_IRQS)) {
++ printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
++ __FUNCTION__, irq);
++ BUG();
++ }
++
++ irq_enter();
++#ifdef CONFIG_DEBUG_STACKOVERFLOW
++ /* Debugging check for stack overflow: is there less than 1KB free? */
++ {
++ long esp;
++
++ __asm__ __volatile__("andl %%esp,%0" :
++ "=r" (esp) : "0" (THREAD_SIZE - 1));
++ if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) {
++ printk("do_IRQ: stack overflow: %ld\n",
++ esp - sizeof(struct thread_info));
++ dump_stack();
++ }
++ }
++#endif
++
++#ifdef CONFIG_4KSTACKS
++
++ curctx = (union irq_ctx *) current_thread_info();
++ irqctx = hardirq_ctx[smp_processor_id()];
++
++ /*
++ * this is where we switch to the IRQ stack. However, if we are
++ * already using the IRQ stack (because we interrupted a hardirq
++ * handler) we can't do that and just have to keep using the
++ * current stack (which is the irq stack already after all)
++ */
++ if (curctx != irqctx) {
++ int arg1, arg2, ebx;
++
++ /* build the stack frame on the IRQ stack */
++ isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
++ irqctx->tinfo.task = curctx->tinfo.task;
++ irqctx->tinfo.previous_esp = current_stack_pointer;
++
++ /*
++ * Copy the softirq bits in preempt_count so that the
++ * softirq checks work in the hardirq context.
++ */
++ irqctx->tinfo.preempt_count =
++ (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
++ (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
++
++ asm volatile(
++ " xchgl %%ebx,%%esp \n"
++ " call __do_IRQ \n"
++ " movl %%ebx,%%esp \n"
++ : "=a" (arg1), "=d" (arg2), "=b" (ebx)
++ : "0" (irq), "1" (regs), "2" (isp)
++ : "memory", "cc", "ecx"
++ );
++ } else
++#endif
++ __do_IRQ(irq, regs);
++
++ irq_exit();
++
++ return 1;
++}
++
++#ifdef CONFIG_4KSTACKS
++
++/*
++ * These should really be __section__(".bss.page_aligned") as well, but
++ * gcc's 3.0 and earlier don't handle that correctly.
++ */
++static char softirq_stack[NR_CPUS * THREAD_SIZE]
++ __attribute__((__aligned__(THREAD_SIZE)));
++
++static char hardirq_stack[NR_CPUS * THREAD_SIZE]
++ __attribute__((__aligned__(THREAD_SIZE)));
++
++/*
++ * allocate per-cpu stacks for hardirq and for softirq processing
++ */
++void irq_ctx_init(int cpu)
++{
++ union irq_ctx *irqctx;
++
++ if (hardirq_ctx[cpu])
++ return;
++
++ irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
++ irqctx->tinfo.task = NULL;
++ irqctx->tinfo.exec_domain = NULL;
++ irqctx->tinfo.cpu = cpu;
++ irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
++ irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
++
++ hardirq_ctx[cpu] = irqctx;
++
++ irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
++ irqctx->tinfo.task = NULL;
++ irqctx->tinfo.exec_domain = NULL;
++ irqctx->tinfo.cpu = cpu;
++ irqctx->tinfo.preempt_count = 0;
++ irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
++
++ softirq_ctx[cpu] = irqctx;
++
++ printk("CPU %u irqstacks, hard=%p soft=%p\n",
++ cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
++}
++
++void irq_ctx_exit(int cpu)
++{
++ hardirq_ctx[cpu] = NULL;
++}
++
++extern asmlinkage void __do_softirq(void);
++
++asmlinkage void do_softirq(void)
++{
++ unsigned long flags;
++ struct thread_info *curctx;
++ union irq_ctx *irqctx;
++ u32 *isp;
++
++ if (in_interrupt())
++ return;
++
++ local_irq_save(flags);
++
++ if (local_softirq_pending()) {
++ curctx = current_thread_info();
++ irqctx = softirq_ctx[smp_processor_id()];
++ irqctx->tinfo.task = curctx->task;
++ irqctx->tinfo.previous_esp = current_stack_pointer;
++
++ /* build the stack frame on the softirq stack */
++ isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
++
++ asm volatile(
++ " xchgl %%ebx,%%esp \n"
++ " call __do_softirq \n"
++ " movl %%ebx,%%esp \n"
++ : "=b"(isp)
++ : "0"(isp)
++ : "memory", "cc", "edx", "ecx", "eax"
++ );
++ /*
++ * Shouldnt happen, we returned above if in_interrupt():
++ */
++ WARN_ON_ONCE(softirq_count());
++ }
++
++ local_irq_restore(flags);
++}
++
++EXPORT_SYMBOL(do_softirq);
++#endif
++
++/*
++ * Interrupt statistics:
++ */
++
++atomic_t irq_err_count;
++
++/*
++ * /proc/interrupts printing:
++ */
++
++int show_interrupts(struct seq_file *p, void *v)
++{
++ int i = *(loff_t *) v, j;
++ struct irqaction * action;
++ unsigned long flags;
++
++ if (i == 0) {
++ seq_printf(p, " ");
++ for_each_online_cpu(j)
++ seq_printf(p, "CPU%-8d",j);
++ seq_putc(p, '\n');
++ }
++
++ if (i < NR_IRQS) {
++ spin_lock_irqsave(&irq_desc[i].lock, flags);
++ action = irq_desc[i].action;
++ if (!action)
++ goto skip;
++ seq_printf(p, "%3d: ",i);
++#ifndef CONFIG_SMP
++ seq_printf(p, "%10u ", kstat_irqs(i));
++#else
++ for_each_online_cpu(j)
++ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
++#endif
++ seq_printf(p, " %14s", irq_desc[i].chip->typename);
++ seq_printf(p, " %s", action->name);
++
++ for (action=action->next; action; action = action->next)
++ seq_printf(p, ", %s", action->name);
++
++ seq_putc(p, '\n');
++skip:
++ spin_unlock_irqrestore(&irq_desc[i].lock, flags);
++ } else if (i == NR_IRQS) {
++ seq_printf(p, "NMI: ");
++ for_each_online_cpu(j)
++ seq_printf(p, "%10u ", nmi_count(j));
++ seq_putc(p, '\n');
++#ifdef CONFIG_X86_LOCAL_APIC
++ seq_printf(p, "LOC: ");
++ for_each_online_cpu(j)
++ seq_printf(p, "%10u ",
++ per_cpu(irq_stat,j).apic_timer_irqs);
++ seq_putc(p, '\n');
++#endif
++ seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
++#if defined(CONFIG_X86_IO_APIC)
++ seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
++#endif
++ }
++ return 0;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++
++void fixup_irqs(cpumask_t map)
++{
++ unsigned int irq;
++ static int warned;
++
++ for (irq = 0; irq < NR_IRQS; irq++) {
++ cpumask_t mask;
++ if (irq == 2)
++ continue;
++
++ cpus_and(mask, irq_desc[irq].affinity, map);
++ if (any_online_cpu(mask) == NR_CPUS) {
++ /*printk("Breaking affinity for irq %i\n", irq);*/
++ mask = map;
++ }
++ if (irq_desc[irq].chip->set_affinity)
++ irq_desc[irq].chip->set_affinity(irq, mask);
++ else if (irq_desc[irq].action && !(warned++))
++ printk("Cannot set affinity for irq %i\n", irq);
++ }
++
++#if 0
++ barrier();
++ /* Ingo Molnar says: "after the IO-APIC masks have been redirected
++ [note the nop - the interrupt-enable boundary on x86 is two
++ instructions from sti] - to flush out pending hardirqs and
++ IPIs. After this point nothing is supposed to reach this CPU." */
++ __asm__ __volatile__("sti; nop; cli");
++ barrier();
++#else
++ /* That doesn't seem sufficient. Give it 1ms. */
++ local_irq_enable();
++ mdelay(1);
++ local_irq_disable();
++#endif
++}
++#endif
++
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/ldt-xen.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/ldt-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/ldt-xen.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/ldt-xen.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,270 @@
++/*
++ * linux/kernel/ldt.c
++ *
++ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
++ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
++ */
++
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/vmalloc.h>
++#include <linux/slab.h>
++
++#include <asm/uaccess.h>
++#include <asm/system.h>
++#include <asm/ldt.h>
++#include <asm/desc.h>
++#include <asm/mmu_context.h>
++
++#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
++static void flush_ldt(void *null)
++{
++ if (current->active_mm)
++ load_LDT(&current->active_mm->context);
++}
++#endif
++
++static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
++{
++ void *oldldt;
++ void *newldt;
++ int oldsize;
++
++ if (mincount <= pc->size)
++ return 0;
++ oldsize = pc->size;
++ mincount = (mincount+511)&(~511);
++ if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
++ newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
++ else
++ newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
++
++ if (!newldt)
++ return -ENOMEM;
++
++ if (oldsize)
++ memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
++ oldldt = pc->ldt;
++ memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
++ pc->ldt = newldt;
++ wmb();
++ pc->size = mincount;
++ wmb();
++
++ if (reload) {
++#ifdef CONFIG_SMP
++ cpumask_t mask;
++ preempt_disable();
++#endif
++ make_pages_readonly(
++ pc->ldt,
++ (pc->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ load_LDT(pc);
++#ifdef CONFIG_SMP
++ mask = cpumask_of_cpu(smp_processor_id());
++ if (!cpus_equal(current->mm->cpu_vm_mask, mask))
++ smp_call_function(flush_ldt, NULL, 1, 1);
++ preempt_enable();
++#endif
++ }
++ if (oldsize) {
++ make_pages_writable(
++ oldldt,
++ (oldsize * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
++ vfree(oldldt);
++ else
++ kfree(oldldt);
++ }
++ return 0;
++}
++
++static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
++{
++ int err = alloc_ldt(new, old->size, 0);
++ if (err < 0)
++ return err;
++ memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
++ make_pages_readonly(
++ new->ldt,
++ (new->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ return 0;
++}
++
++/*
++ * we do not have to muck with descriptors here, that is
++ * done in switch_mm() as needed.
++ */
++int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
++{
++ struct mm_struct * old_mm;
++ int retval = 0;
++
++ init_MUTEX(&mm->context.sem);
++ mm->context.size = 0;
++ mm->context.has_foreign_mappings = 0;
++ old_mm = current->mm;
++ if (old_mm && old_mm->context.size > 0) {
++ down(&old_mm->context.sem);
++ retval = copy_ldt(&mm->context, &old_mm->context);
++ up(&old_mm->context.sem);
++ }
++ return retval;
++}
++
++/*
++ * No need to lock the MM as we are the last user
++ */
++void destroy_context(struct mm_struct *mm)
++{
++ if (mm->context.size) {
++ if (mm == current->active_mm)
++ clear_LDT();
++ make_pages_writable(
++ mm->context.ldt,
++ (mm->context.size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
++ vfree(mm->context.ldt);
++ else
++ kfree(mm->context.ldt);
++ mm->context.size = 0;
++ }
++}
++
++static int read_ldt(void __user * ptr, unsigned long bytecount)
++{
++ int err;
++ unsigned long size;
++ struct mm_struct * mm = current->mm;
++
++ if (!mm->context.size)
++ return 0;
++ if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
++ bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
++
++ down(&mm->context.sem);
++ size = mm->context.size*LDT_ENTRY_SIZE;
++ if (size > bytecount)
++ size = bytecount;
++
++ err = 0;
++ if (copy_to_user(ptr, mm->context.ldt, size))
++ err = -EFAULT;
++ up(&mm->context.sem);
++ if (err < 0)
++ goto error_return;
++ if (size != bytecount) {
++ /* zero-fill the rest */
++ if (clear_user(ptr+size, bytecount-size) != 0) {
++ err = -EFAULT;
++ goto error_return;
++ }
++ }
++ return bytecount;
++error_return:
++ return err;
++}
++
++static int read_default_ldt(void __user * ptr, unsigned long bytecount)
++{
++ int err;
++ unsigned long size;
++ void *address;
++
++ err = 0;
++ address = &default_ldt[0];
++ size = 5*sizeof(struct desc_struct);
++ if (size > bytecount)
++ size = bytecount;
++
++ err = size;
++ if (copy_to_user(ptr, address, size))
++ err = -EFAULT;
++
++ return err;
++}
++
++static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
++{
++ struct mm_struct * mm = current->mm;
++ __u32 entry_1, entry_2;
++ int error;
++ struct user_desc ldt_info;
++
++ error = -EINVAL;
++ if (bytecount != sizeof(ldt_info))
++ goto out;
++ error = -EFAULT;
++ if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
++ goto out;
++
++ error = -EINVAL;
++ if (ldt_info.entry_number >= LDT_ENTRIES)
++ goto out;
++ if (ldt_info.contents == 3) {
++ if (oldmode)
++ goto out;
++ if (ldt_info.seg_not_present == 0)
++ goto out;
++ }
++
++ down(&mm->context.sem);
++ if (ldt_info.entry_number >= mm->context.size) {
++ error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
++ if (error < 0)
++ goto out_unlock;
++ }
++
++ /* Allow LDTs to be cleared by the user. */
++ if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
++ if (oldmode || LDT_empty(&ldt_info)) {
++ entry_1 = 0;
++ entry_2 = 0;
++ goto install;
++ }
++ }
++
++ entry_1 = LDT_entry_a(&ldt_info);
++ entry_2 = LDT_entry_b(&ldt_info);
++ if (oldmode)
++ entry_2 &= ~(1 << 20);
++
++ /* Install the new entry ... */
++install:
++ error = write_ldt_entry(mm->context.ldt, ldt_info.entry_number,
++ entry_1, entry_2);
++
++out_unlock:
++ up(&mm->context.sem);
++out:
++ return error;
++}
++
++asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
++{
++ int ret = -ENOSYS;
++
++ switch (func) {
++ case 0:
++ ret = read_ldt(ptr, bytecount);
++ break;
++ case 1:
++ ret = write_ldt(ptr, bytecount, 1);
++ break;
++ case 2:
++ ret = read_default_ldt(ptr, bytecount);
++ break;
++ case 0x11:
++ ret = write_ldt(ptr, bytecount, 0);
++ break;
++ }
++ return ret;
++}
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/machine_kexec.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/machine_kexec.c
+--- linux-2.6.18.8/arch/i386/kernel/machine_kexec.c 2007-02-23 15:52:30.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/machine_kexec.c 2008-02-15 16:21:49.000000000 -0800
+@@ -19,123 +19,52 @@
+ #include <asm/desc.h>
+ #include <asm/system.h>
+
+-#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
+-
+-#define L0_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
+-#define L1_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
+-#define L2_ATTR (_PAGE_PRESENT)
+-
+-#define LEVEL0_SIZE (1UL << 12UL)
+-
+-#ifndef CONFIG_X86_PAE
+-#define LEVEL1_SIZE (1UL << 22UL)
+-static u32 pgtable_level1[1024] PAGE_ALIGNED;
+-
+-static void identity_map_page(unsigned long address)
+-{
+- unsigned long level1_index, level2_index;
+- u32 *pgtable_level2;
+-
+- /* Find the current page table */
+- pgtable_level2 = __va(read_cr3());
+-
+- /* Find the indexes of the physical address to identity map */
+- level1_index = (address % LEVEL1_SIZE)/LEVEL0_SIZE;
+- level2_index = address / LEVEL1_SIZE;
+-
+- /* Identity map the page table entry */
+- pgtable_level1[level1_index] = address | L0_ATTR;
+- pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR;
+-
+- /* Flush the tlb so the new mapping takes effect.
+- * Global tlb entries are not flushed but that is not an issue.
+- */
+- load_cr3(pgtable_level2);
+-}
+-
+-#else
+-#define LEVEL1_SIZE (1UL << 21UL)
+-#define LEVEL2_SIZE (1UL << 30UL)
+-static u64 pgtable_level1[512] PAGE_ALIGNED;
+-static u64 pgtable_level2[512] PAGE_ALIGNED;
+-
+-static void identity_map_page(unsigned long address)
+-{
+- unsigned long level1_index, level2_index, level3_index;
+- u64 *pgtable_level3;
+-
+- /* Find the current page table */
+- pgtable_level3 = __va(read_cr3());
++#ifdef CONFIG_XEN
++#include <xen/interface/kexec.h>
++#endif
+
+- /* Find the indexes of the physical address to identity map */
+- level1_index = (address % LEVEL1_SIZE)/LEVEL0_SIZE;
+- level2_index = (address % LEVEL2_SIZE)/LEVEL1_SIZE;
+- level3_index = address / LEVEL2_SIZE;
+-
+- /* Identity map the page table entry */
+- pgtable_level1[level1_index] = address | L0_ATTR;
+- pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR;
+- set_64bit(&pgtable_level3[level3_index],
+- __pa(pgtable_level2) | L2_ATTR);
+-
+- /* Flush the tlb so the new mapping takes effect.
+- * Global tlb entries are not flushed but that is not an issue.
+- */
+- load_cr3(pgtable_level3);
+-}
++#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
++static u32 kexec_pgd[1024] PAGE_ALIGNED;
++#ifdef CONFIG_X86_PAE
++static u32 kexec_pmd0[1024] PAGE_ALIGNED;
++static u32 kexec_pmd1[1024] PAGE_ALIGNED;
+ #endif
++static u32 kexec_pte0[1024] PAGE_ALIGNED;
++static u32 kexec_pte1[1024] PAGE_ALIGNED;
+
+-static void set_idt(void *newidt, __u16 limit)
+-{
+- struct Xgt_desc_struct curidt;
++#ifdef CONFIG_XEN
+
+- /* ia32 supports unaliged loads & stores */
+- curidt.size = limit;
+- curidt.address = (unsigned long)newidt;
++#define __ma(x) (pfn_to_mfn(__pa((x)) >> PAGE_SHIFT) << PAGE_SHIFT)
+
+- load_idt(&curidt);
+-};
++#if PAGES_NR > KEXEC_XEN_NO_PAGES
++#error PAGES_NR is greater than KEXEC_XEN_NO_PAGES - Xen support will break
++#endif
+
++#if PA_CONTROL_PAGE != 0
++#error PA_CONTROL_PAGE is non zero - Xen support will break
++#endif
+
+-static void set_gdt(void *newgdt, __u16 limit)
++void machine_kexec_setup_load_arg(xen_kexec_image_t *xki, struct kimage *image)
+ {
+- struct Xgt_desc_struct curgdt;
++ void *control_page;
+
+- /* ia32 supports unaligned loads & stores */
+- curgdt.size = limit;
+- curgdt.address = (unsigned long)newgdt;
++ memset(xki->page_list, 0, sizeof(xki->page_list));
+
+- load_gdt(&curgdt);
+-};
++ control_page = page_address(image->control_code_page);
++ memcpy(control_page, relocate_kernel, PAGE_SIZE);
+
+-static void load_segments(void)
+-{
+-#define __STR(X) #X
+-#define STR(X) __STR(X)
++ xki->page_list[PA_CONTROL_PAGE] = __ma(control_page);
++ xki->page_list[PA_PGD] = __ma(kexec_pgd);
++#ifdef CONFIG_X86_PAE
++ xki->page_list[PA_PMD_0] = __ma(kexec_pmd0);
++ xki->page_list[PA_PMD_1] = __ma(kexec_pmd1);
++#endif
++ xki->page_list[PA_PTE_0] = __ma(kexec_pte0);
++ xki->page_list[PA_PTE_1] = __ma(kexec_pte1);
+
+- __asm__ __volatile__ (
+- "\tljmp $"STR(__KERNEL_CS)",$1f\n"
+- "\t1:\n"
+- "\tmovl $"STR(__KERNEL_DS)",%%eax\n"
+- "\tmovl %%eax,%%ds\n"
+- "\tmovl %%eax,%%es\n"
+- "\tmovl %%eax,%%fs\n"
+- "\tmovl %%eax,%%gs\n"
+- "\tmovl %%eax,%%ss\n"
+- ::: "eax", "memory");
+-#undef STR
+-#undef __STR
+ }
+
+-typedef asmlinkage NORET_TYPE void (*relocate_new_kernel_t)(
+- unsigned long indirection_page,
+- unsigned long reboot_code_buffer,
+- unsigned long start_address,
+- unsigned int has_pae) ATTRIB_NORET;
+-
+-extern const unsigned char relocate_new_kernel[];
+-extern void relocate_new_kernel_end(void);
+-extern const unsigned int relocate_new_kernel_size;
++#endif /* CONFIG_XEN */
+
+ /*
+ * A architecture hook called to validate the
+@@ -163,49 +92,38 @@ void machine_kexec_cleanup(struct kimage
+ {
+ }
+
++#ifndef CONFIG_XEN
+ /*
+ * Do not allocate memory (or fail in any way) in machine_kexec().
+ * We are past the point of no return, committed to rebooting now.
+ */
+ NORET_TYPE void machine_kexec(struct kimage *image)
+ {
+- unsigned long page_list;
+- unsigned long reboot_code_buffer;
+-
+- relocate_new_kernel_t rnk;
++ unsigned long page_list[PAGES_NR];
++ void *control_page;
+
+ /* Interrupts aren't acceptable while we reboot */
+ local_irq_disable();
+
+- /* Compute some offsets */
+- reboot_code_buffer = page_to_pfn(image->control_code_page)
+- << PAGE_SHIFT;
+- page_list = image->head;
+-
+- /* Set up an identity mapping for the reboot_code_buffer */
+- identity_map_page(reboot_code_buffer);
+-
+- /* copy it out */
+- memcpy((void *)reboot_code_buffer, relocate_new_kernel,
+- relocate_new_kernel_size);
+-
+- /* The segment registers are funny things, they have both a
+- * visible and an invisible part. Whenever the visible part is
+- * set to a specific selector, the invisible part is loaded
+- * with from a table in memory. At no other time is the
+- * descriptor table in memory accessed.
+- *
+- * I take advantage of this here by force loading the
+- * segments, before I zap the gdt with an invalid value.
+- */
+- load_segments();
+- /* The gdt & idt are now invalid.
+- * If you want to load them you must set up your own idt & gdt.
+- */
+- set_gdt(phys_to_virt(0),0);
+- set_idt(phys_to_virt(0),0);
+-
+- /* now call it */
+- rnk = (relocate_new_kernel_t) reboot_code_buffer;
+- (*rnk)(page_list, reboot_code_buffer, image->start, cpu_has_pae);
++ control_page = page_address(image->control_code_page);
++ memcpy(control_page, relocate_kernel, PAGE_SIZE);
++
++ page_list[PA_CONTROL_PAGE] = __pa(control_page);
++ page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel;
++ page_list[PA_PGD] = __pa(kexec_pgd);
++ page_list[VA_PGD] = (unsigned long)kexec_pgd;
++#ifdef CONFIG_X86_PAE
++ page_list[PA_PMD_0] = __pa(kexec_pmd0);
++ page_list[VA_PMD_0] = (unsigned long)kexec_pmd0;
++ page_list[PA_PMD_1] = __pa(kexec_pmd1);
++ page_list[VA_PMD_1] = (unsigned long)kexec_pmd1;
++#endif
++ page_list[PA_PTE_0] = __pa(kexec_pte0);
++ page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
++ page_list[PA_PTE_1] = __pa(kexec_pte1);
++ page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
++
++ relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
++ image->start, cpu_has_pae);
+ }
++#endif
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/microcode-xen.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/microcode-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/microcode-xen.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/microcode-xen.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,144 @@
++/*
++ * Intel CPU Microcode Update Driver for Linux
++ *
++ * Copyright (C) 2000-2004 Tigran Aivazian
++ *
++ * This driver allows to upgrade microcode on Intel processors
++ * belonging to IA-32 family - PentiumPro, Pentium II,
++ * Pentium III, Xeon, Pentium 4, etc.
++ *
++ * Reference: Section 8.10 of Volume III, Intel Pentium 4 Manual,
++ * Order Number 245472 or free download from:
++ *
++ * http://developer.intel.com/design/pentium4/manuals/245472.htm
++ *
++ * For more information, go to http://www.urbanmyth.org/microcode
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++//#define DEBUG /* pr_debug */
++#include <linux/capability.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/sched.h>
++#include <linux/cpumask.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/miscdevice.h>
++#include <linux/spinlock.h>
++#include <linux/mm.h>
++#include <linux/mutex.h>
++#include <linux/syscalls.h>
++
++#include <asm/msr.h>
++#include <asm/uaccess.h>
++#include <asm/processor.h>
++
++MODULE_DESCRIPTION("Intel CPU (IA-32) Microcode Update Driver");
++MODULE_AUTHOR("Tigran Aivazian <tigran@veritas.com>");
++MODULE_LICENSE("GPL");
++
++static int verbose;
++module_param(verbose, int, 0644);
++
++#define MICROCODE_VERSION "1.14a-xen"
++
++#define DEFAULT_UCODE_DATASIZE (2000) /* 2000 bytes */
++#define MC_HEADER_SIZE (sizeof (microcode_header_t)) /* 48 bytes */
++#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE) /* 2048 bytes */
++
++/* no concurrent ->write()s are allowed on /dev/cpu/microcode */
++static DEFINE_MUTEX(microcode_mutex);
++
++static int microcode_open (struct inode *unused1, struct file *unused2)
++{
++ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
++}
++
++
++static int do_microcode_update (const void __user *ubuf, size_t len)
++{
++ int err;
++ void *kbuf;
++
++ kbuf = vmalloc(len);
++ if (!kbuf)
++ return -ENOMEM;
++
++ if (copy_from_user(kbuf, ubuf, len) == 0) {
++ struct xen_platform_op op;
++
++ op.cmd = XENPF_microcode_update;
++ set_xen_guest_handle(op.u.microcode.data, kbuf);
++ op.u.microcode.length = len;
++ err = HYPERVISOR_platform_op(&op);
++ } else
++ err = -EFAULT;
++
++ vfree(kbuf);
++
++ return err;
++}
++
++static ssize_t microcode_write (struct file *file, const char __user *buf, size_t len, loff_t *ppos)
++{
++ ssize_t ret;
++
++ if (len < MC_HEADER_SIZE) {
++ printk(KERN_ERR "microcode: not enough data\n");
++ return -EINVAL;
++ }
++
++ mutex_lock(&microcode_mutex);
++
++ ret = do_microcode_update(buf, len);
++ if (!ret)
++ ret = (ssize_t)len;
++
++ mutex_unlock(&microcode_mutex);
++
++ return ret;
++}
++
++static struct file_operations microcode_fops = {
++ .owner = THIS_MODULE,
++ .write = microcode_write,
++ .open = microcode_open,
++};
++
++static struct miscdevice microcode_dev = {
++ .minor = MICROCODE_MINOR,
++ .name = "microcode",
++ .fops = &microcode_fops,
++};
++
++static int __init microcode_init (void)
++{
++ int error;
++
++ error = misc_register(&microcode_dev);
++ if (error) {
++ printk(KERN_ERR
++ "microcode: can't misc_register on minor=%d\n",
++ MICROCODE_MINOR);
++ return error;
++ }
++
++ printk(KERN_INFO
++ "IA-32 Microcode Update Driver: v" MICROCODE_VERSION " <tigran@veritas.com>\n");
++ return 0;
++}
++
++static void __exit microcode_exit (void)
++{
++ misc_deregister(&microcode_dev);
++}
++
++module_init(microcode_init)
++module_exit(microcode_exit)
++MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/mpparse-xen.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/mpparse-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/mpparse-xen.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/mpparse-xen.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,1185 @@
++/*
++ * Intel Multiprocessor Specification 1.1 and 1.4
++ * compliant MP-table parsing routines.
++ *
++ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
++ * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
++ *
++ * Fixes
++ * Erich Boleyn : MP v1.4 and additional changes.
++ * Alan Cox : Added EBDA scanning
++ * Ingo Molnar : various cleanups and rewrites
++ * Maciej W. Rozycki: Bits for default MP configurations
++ * Paul Diefenbaugh: Added full ACPI support
++ */
++
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/acpi.h>
++#include <linux/delay.h>
++#include <linux/bootmem.h>
++#include <linux/smp_lock.h>
++#include <linux/kernel_stat.h>
++#include <linux/mc146818rtc.h>
++#include <linux/bitops.h>
++
++#include <asm/smp.h>
++#include <asm/acpi.h>
++#include <asm/mtrr.h>
++#include <asm/mpspec.h>
++#include <asm/io_apic.h>
++
++#include <mach_apic.h>
++#include <mach_mpparse.h>
++#include <bios_ebda.h>
++
++/* Have we found an MP table */
++int smp_found_config;
++unsigned int __initdata maxcpus = NR_CPUS;
++
++/*
++ * Various Linux-internal data structures created from the
++ * MP-table.
++ */
++int apic_version [MAX_APICS];
++int mp_bus_id_to_type [MAX_MP_BUSSES];
++int mp_bus_id_to_node [MAX_MP_BUSSES];
++int mp_bus_id_to_local [MAX_MP_BUSSES];
++int quad_local_to_mp_bus_id [NR_CPUS/4][4];
++int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
++static int mp_current_pci_id;
++
++/* I/O APIC entries */
++struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
++
++/* # of MP IRQ source entries */
++struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
++
++/* MP IRQ source entries */
++int mp_irq_entries;
++
++int nr_ioapics;
++
++int pic_mode;
++unsigned long mp_lapic_addr;
++
++unsigned int def_to_bigsmp = 0;
++
++/* Processor that is doing the boot up */
++unsigned int boot_cpu_physical_apicid = -1U;
++/* Internal processor count */
++static unsigned int __devinitdata num_processors;
++
++/* Bitmask of physically existing CPUs */
++physid_mask_t phys_cpu_present_map;
++
++u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++
++/*
++ * Intel MP BIOS table parsing routines:
++ */
++
++
++/*
++ * Checksum an MP configuration block.
++ */
++
++static int __init mpf_checksum(unsigned char *mp, int len)
++{
++ int sum = 0;
++
++ while (len--)
++ sum += *mp++;
++
++ return sum & 0xFF;
++}
++
++/*
++ * Have to match translation table entries to main table entries by counter
++ * hence the mpc_record variable .... can't see a less disgusting way of
++ * doing this ....
++ */
++
++static int mpc_record;
++static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdata;
++
++#ifndef CONFIG_XEN
++static void __devinit MP_processor_info (struct mpc_config_processor *m)
++{
++ int ver, apicid;
++ physid_mask_t phys_cpu;
++
++ if (!(m->mpc_cpuflag & CPU_ENABLED))
++ return;
++
++ apicid = mpc_apic_id(m, translation_table[mpc_record]);
++
++ if (m->mpc_featureflag&(1<<0))
++ Dprintk(" Floating point unit present.\n");
++ if (m->mpc_featureflag&(1<<7))
++ Dprintk(" Machine Exception supported.\n");
++ if (m->mpc_featureflag&(1<<8))
++ Dprintk(" 64 bit compare & exchange supported.\n");
++ if (m->mpc_featureflag&(1<<9))
++ Dprintk(" Internal APIC present.\n");
++ if (m->mpc_featureflag&(1<<11))
++ Dprintk(" SEP present.\n");
++ if (m->mpc_featureflag&(1<<12))
++ Dprintk(" MTRR present.\n");
++ if (m->mpc_featureflag&(1<<13))
++ Dprintk(" PGE present.\n");
++ if (m->mpc_featureflag&(1<<14))
++ Dprintk(" MCA present.\n");
++ if (m->mpc_featureflag&(1<<15))
++ Dprintk(" CMOV present.\n");
++ if (m->mpc_featureflag&(1<<16))
++ Dprintk(" PAT present.\n");
++ if (m->mpc_featureflag&(1<<17))
++ Dprintk(" PSE present.\n");
++ if (m->mpc_featureflag&(1<<18))
++ Dprintk(" PSN present.\n");
++ if (m->mpc_featureflag&(1<<19))
++ Dprintk(" Cache Line Flush Instruction present.\n");
++ /* 20 Reserved */
++ if (m->mpc_featureflag&(1<<21))
++ Dprintk(" Debug Trace and EMON Store present.\n");
++ if (m->mpc_featureflag&(1<<22))
++ Dprintk(" ACPI Thermal Throttle Registers present.\n");
++ if (m->mpc_featureflag&(1<<23))
++ Dprintk(" MMX present.\n");
++ if (m->mpc_featureflag&(1<<24))
++ Dprintk(" FXSR present.\n");
++ if (m->mpc_featureflag&(1<<25))
++ Dprintk(" XMM present.\n");
++ if (m->mpc_featureflag&(1<<26))
++ Dprintk(" Willamette New Instructions present.\n");
++ if (m->mpc_featureflag&(1<<27))
++ Dprintk(" Self Snoop present.\n");
++ if (m->mpc_featureflag&(1<<28))
++ Dprintk(" HT present.\n");
++ if (m->mpc_featureflag&(1<<29))
++ Dprintk(" Thermal Monitor present.\n");
++ /* 30, 31 Reserved */
++
++
++ if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
++ Dprintk(" Bootup CPU\n");
++ boot_cpu_physical_apicid = m->mpc_apicid;
++ }
++
++ ver = m->mpc_apicver;
++
++ /*
++ * Validate version
++ */
++ if (ver == 0x0) {
++ printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! "
++ "fixing up to 0x10. (tell your hw vendor)\n",
++ m->mpc_apicid);
++ ver = 0x10;
++ }
++ apic_version[m->mpc_apicid] = ver;
++
++ phys_cpu = apicid_to_cpu_present(apicid);
++ physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
++
++ if (num_processors >= NR_CPUS) {
++ printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
++ " Processor ignored.\n", NR_CPUS);
++ return;
++ }
++
++ if (num_processors >= maxcpus) {
++ printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
++ " Processor ignored.\n", maxcpus);
++ return;
++ }
++
++ cpu_set(num_processors, cpu_possible_map);
++ num_processors++;
++
++ /*
++ * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
++ * but we need to work other dependencies like SMP_SUSPEND etc
++ * before this can be done without some confusion.
++ * if (CPU_HOTPLUG_ENABLED || num_processors > 8)
++ * - Ashok Raj <ashok.raj@intel.com>
++ */
++ if (num_processors > 8) {
++ switch (boot_cpu_data.x86_vendor) {
++ case X86_VENDOR_INTEL:
++ if (!APIC_XAPIC(ver)) {
++ def_to_bigsmp = 0;
++ break;
++ }
++ /* If P4 and above fall through */
++ case X86_VENDOR_AMD:
++ def_to_bigsmp = 1;
++ }
++ }
++ bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
++}
++#else
++void __init MP_processor_info (struct mpc_config_processor *m)
++{
++ num_processors++;
++}
++#endif /* CONFIG_XEN */
++
++static void __init MP_bus_info (struct mpc_config_bus *m)
++{
++ char str[7];
++
++ memcpy(str, m->mpc_bustype, 6);
++ str[6] = 0;
++
++ mpc_oem_bus_info(m, str, translation_table[mpc_record]);
++
++ if (m->mpc_busid >= MAX_MP_BUSSES) {
++ printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
++ " is too large, max. supported is %d\n",
++ m->mpc_busid, str, MAX_MP_BUSSES - 1);
++ return;
++ }
++
++ if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
++ } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
++ } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
++ mpc_oem_pci_bus(m, translation_table[mpc_record]);
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
++ mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
++ mp_current_pci_id++;
++ } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
++ } else if (strncmp(str, BUSTYPE_NEC98, sizeof(BUSTYPE_NEC98)-1) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_NEC98;
++ } else {
++ printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
++ }
++}
++
++static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
++{
++ if (!(m->mpc_flags & MPC_APIC_USABLE))
++ return;
++
++ printk(KERN_INFO "I/O APIC #%d Version %d at 0x%lX.\n",
++ m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
++ if (nr_ioapics >= MAX_IO_APICS) {
++ printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n",
++ MAX_IO_APICS, nr_ioapics);
++ panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
++ }
++ if (!m->mpc_apicaddr) {
++ printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
++ " found in MP table, skipping!\n");
++ return;
++ }
++ mp_ioapics[nr_ioapics] = *m;
++ nr_ioapics++;
++}
++
++static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
++{
++ mp_irqs [mp_irq_entries] = *m;
++ Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
++ " IRQ %02x, APIC ID %x, APIC INT %02x\n",
++ m->mpc_irqtype, m->mpc_irqflag & 3,
++ (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
++ m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
++ if (++mp_irq_entries == MAX_IRQ_SOURCES)
++ panic("Max # of irq sources exceeded!!\n");
++}
++
++static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
++{
++ Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
++ " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
++ m->mpc_irqtype, m->mpc_irqflag & 3,
++ (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
++ m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
++ /*
++ * Well it seems all SMP boards in existence
++ * use ExtINT/LVT1 == LINT0 and
++ * NMI/LVT2 == LINT1 - the following check
++ * will show us if this assumptions is false.
++ * Until then we do not have to add baggage.
++ */
++ if ((m->mpc_irqtype == mp_ExtINT) &&
++ (m->mpc_destapiclint != 0))
++ BUG();
++ if ((m->mpc_irqtype == mp_NMI) &&
++ (m->mpc_destapiclint != 1))
++ BUG();
++}
++
++#ifdef CONFIG_X86_NUMAQ
++static void __init MP_translation_info (struct mpc_config_translation *m)
++{
++ printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local);
++
++ if (mpc_record >= MAX_MPC_ENTRY)
++ printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
++ else
++ translation_table[mpc_record] = m; /* stash this for later */
++ if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
++ node_set_online(m->trans_quad);
++}
++
++/*
++ * Read/parse the MPC oem tables
++ */
++
++static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \
++ unsigned short oemsize)
++{
++ int count = sizeof (*oemtable); /* the header size */
++ unsigned char *oemptr = ((unsigned char *)oemtable)+count;
++
++ mpc_record = 0;
++ printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
++ if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4))
++ {
++ printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
++ oemtable->oem_signature[0],
++ oemtable->oem_signature[1],
++ oemtable->oem_signature[2],
++ oemtable->oem_signature[3]);
++ return;
++ }
++ if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length))
++ {
++ printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
++ return;
++ }
++ while (count < oemtable->oem_length) {
++ switch (*oemptr) {
++ case MP_TRANSLATION:
++ {
++ struct mpc_config_translation *m=
++ (struct mpc_config_translation *)oemptr;
++ MP_translation_info(m);
++ oemptr += sizeof(*m);
++ count += sizeof(*m);
++ ++mpc_record;
++ break;
++ }
++ default:
++ {
++ printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr);
++ return;
++ }
++ }
++ }
++}
++
++static inline void mps_oem_check(struct mp_config_table *mpc, char *oem,
++ char *productid)
++{
++ if (strncmp(oem, "IBM NUMA", 8))
++ printk("Warning! May not be a NUMA-Q system!\n");
++ if (mpc->mpc_oemptr)
++ smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr,
++ mpc->mpc_oemsize);
++}
++#endif /* CONFIG_X86_NUMAQ */
++
++/*
++ * Read/parse the MPC
++ */
++
++static int __init smp_read_mpc(struct mp_config_table *mpc)
++{
++ char str[16];
++ char oem[10];
++ int count=sizeof(*mpc);
++ unsigned char *mpt=((unsigned char *)mpc)+count;
++
++ if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
++ printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n",
++ *(u32 *)mpc->mpc_signature);
++ return 0;
++ }
++ if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
++ printk(KERN_ERR "SMP mptable: checksum error!\n");
++ return 0;
++ }
++ if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
++ printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
++ mpc->mpc_spec);
++ return 0;
++ }
++ if (!mpc->mpc_lapic) {
++ printk(KERN_ERR "SMP mptable: null local APIC address!\n");
++ return 0;
++ }
++ memcpy(oem,mpc->mpc_oem,8);
++ oem[8]=0;
++ printk(KERN_INFO "OEM ID: %s ",oem);
++
++ memcpy(str,mpc->mpc_productid,12);
++ str[12]=0;
++ printk("Product ID: %s ",str);
++
++ mps_oem_check(mpc, oem, str);
++
++ printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
++
++ /*
++ * Save the local APIC address (it might be non-default) -- but only
++ * if we're not using ACPI.
++ */
++ if (!acpi_lapic)
++ mp_lapic_addr = mpc->mpc_lapic;
++
++ /*
++ * Now process the configuration blocks.
++ */
++ mpc_record = 0;
++ while (count < mpc->mpc_length) {
++ switch(*mpt) {
++ case MP_PROCESSOR:
++ {
++ struct mpc_config_processor *m=
++ (struct mpc_config_processor *)mpt;
++ /* ACPI may have already provided this data */
++ if (!acpi_lapic)
++ MP_processor_info(m);
++ mpt += sizeof(*m);
++ count += sizeof(*m);
++ break;
++ }
++ case MP_BUS:
++ {
++ struct mpc_config_bus *m=
++ (struct mpc_config_bus *)mpt;
++ MP_bus_info(m);
++ mpt += sizeof(*m);
++ count += sizeof(*m);
++ break;
++ }
++ case MP_IOAPIC:
++ {
++ struct mpc_config_ioapic *m=
++ (struct mpc_config_ioapic *)mpt;
++ MP_ioapic_info(m);
++ mpt+=sizeof(*m);
++ count+=sizeof(*m);
++ break;
++ }
++ case MP_INTSRC:
++ {
++ struct mpc_config_intsrc *m=
++ (struct mpc_config_intsrc *)mpt;
++
++ MP_intsrc_info(m);
++ mpt+=sizeof(*m);
++ count+=sizeof(*m);
++ break;
++ }
++ case MP_LINTSRC:
++ {
++ struct mpc_config_lintsrc *m=
++ (struct mpc_config_lintsrc *)mpt;
++ MP_lintsrc_info(m);
++ mpt+=sizeof(*m);
++ count+=sizeof(*m);
++ break;
++ }
++ default:
++ {
++ count = mpc->mpc_length;
++ break;
++ }
++ }
++ ++mpc_record;
++ }
++ clustered_apic_check();
++ if (!num_processors)
++ printk(KERN_ERR "SMP mptable: no processors registered!\n");
++ return num_processors;
++}
++
++static int __init ELCR_trigger(unsigned int irq)
++{
++ unsigned int port;
++
++ port = 0x4d0 + (irq >> 3);
++ return (inb(port) >> (irq & 7)) & 1;
++}
++
++static void __init construct_default_ioirq_mptable(int mpc_default_type)
++{
++ struct mpc_config_intsrc intsrc;
++ int i;
++ int ELCR_fallback = 0;
++
++ intsrc.mpc_type = MP_INTSRC;
++ intsrc.mpc_irqflag = 0; /* conforming */
++ intsrc.mpc_srcbus = 0;
++ intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
++
++ intsrc.mpc_irqtype = mp_INT;
++
++ /*
++ * If true, we have an ISA/PCI system with no IRQ entries
++ * in the MP table. To prevent the PCI interrupts from being set up
++ * incorrectly, we try to use the ELCR. The sanity check to see if
++ * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
++ * never be level sensitive, so we simply see if the ELCR agrees.
++ * If it does, we assume it's valid.
++ */
++ if (mpc_default_type == 5) {
++ printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
++
++ if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
++ printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n");
++ else {
++ printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
++ ELCR_fallback = 1;
++ }
++ }
++
++ for (i = 0; i < 16; i++) {
++ switch (mpc_default_type) {
++ case 2:
++ if (i == 0 || i == 13)
++ continue; /* IRQ0 & IRQ13 not connected */
++ /* fall through */
++ default:
++ if (i == 2)
++ continue; /* IRQ2 is never connected */
++ }
++
++ if (ELCR_fallback) {
++ /*
++ * If the ELCR indicates a level-sensitive interrupt, we
++ * copy that information over to the MP table in the
++ * irqflag field (level sensitive, active high polarity).
++ */
++ if (ELCR_trigger(i))
++ intsrc.mpc_irqflag = 13;
++ else
++ intsrc.mpc_irqflag = 0;
++ }
++
++ intsrc.mpc_srcbusirq = i;
++ intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
++ MP_intsrc_info(&intsrc);
++ }
++
++ intsrc.mpc_irqtype = mp_ExtINT;
++ intsrc.mpc_srcbusirq = 0;
++ intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
++ MP_intsrc_info(&intsrc);
++}
++
++static inline void __init construct_default_ISA_mptable(int mpc_default_type)
++{
++ struct mpc_config_processor processor;
++ struct mpc_config_bus bus;
++ struct mpc_config_ioapic ioapic;
++ struct mpc_config_lintsrc lintsrc;
++ int linttypes[2] = { mp_ExtINT, mp_NMI };
++ int i;
++
++ /*
++ * local APIC has default address
++ */
++ mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
++
++ /*
++ * 2 CPUs, numbered 0 & 1.
++ */
++ processor.mpc_type = MP_PROCESSOR;
++ /* Either an integrated APIC or a discrete 82489DX. */
++ processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
++ processor.mpc_cpuflag = CPU_ENABLED;
++ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
++ (boot_cpu_data.x86_model << 4) |
++ boot_cpu_data.x86_mask;
++ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
++ processor.mpc_reserved[0] = 0;
++ processor.mpc_reserved[1] = 0;
++ for (i = 0; i < 2; i++) {
++ processor.mpc_apicid = i;
++ MP_processor_info(&processor);
++ }
++
++ bus.mpc_type = MP_BUS;
++ bus.mpc_busid = 0;
++ switch (mpc_default_type) {
++ default:
++ printk("???\n");
++ printk(KERN_ERR "Unknown standard configuration %d\n",
++ mpc_default_type);
++ /* fall through */
++ case 1:
++ case 5:
++ memcpy(bus.mpc_bustype, "ISA ", 6);
++ break;
++ case 2:
++ case 6:
++ case 3:
++ memcpy(bus.mpc_bustype, "EISA ", 6);
++ break;
++ case 4:
++ case 7:
++ memcpy(bus.mpc_bustype, "MCA ", 6);
++ }
++ MP_bus_info(&bus);
++ if (mpc_default_type > 4) {
++ bus.mpc_busid = 1;
++ memcpy(bus.mpc_bustype, "PCI ", 6);
++ MP_bus_info(&bus);
++ }
++
++ ioapic.mpc_type = MP_IOAPIC;
++ ioapic.mpc_apicid = 2;
++ ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
++ ioapic.mpc_flags = MPC_APIC_USABLE;
++ ioapic.mpc_apicaddr = 0xFEC00000;
++ MP_ioapic_info(&ioapic);
++
++ /*
++ * We set up most of the low 16 IO-APIC pins according to MPS rules.
++ */
++ construct_default_ioirq_mptable(mpc_default_type);
++
++ lintsrc.mpc_type = MP_LINTSRC;
++ lintsrc.mpc_irqflag = 0; /* conforming */
++ lintsrc.mpc_srcbusid = 0;
++ lintsrc.mpc_srcbusirq = 0;
++ lintsrc.mpc_destapic = MP_APIC_ALL;
++ for (i = 0; i < 2; i++) {
++ lintsrc.mpc_irqtype = linttypes[i];
++ lintsrc.mpc_destapiclint = i;
++ MP_lintsrc_info(&lintsrc);
++ }
++}
++
++static struct intel_mp_floating *mpf_found;
++
++/*
++ * Scan the memory blocks for an SMP configuration block.
++ */
++void __init get_smp_config (void)
++{
++ struct intel_mp_floating *mpf = mpf_found;
++
++ /*
++ * ACPI supports both logical (e.g. Hyper-Threading) and physical
++ * processors, where MPS only supports physical.
++ */
++ if (acpi_lapic && acpi_ioapic) {
++ printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
++ return;
++ }
++ else if (acpi_lapic)
++ printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
++
++ printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
++ if (mpf->mpf_feature2 & (1<<7)) {
++ printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
++ pic_mode = 1;
++ } else {
++ printk(KERN_INFO " Virtual Wire compatibility mode.\n");
++ pic_mode = 0;
++ }
++
++ /*
++ * Now see if we need to read further.
++ */
++ if (mpf->mpf_feature1 != 0) {
++
++ printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
++ construct_default_ISA_mptable(mpf->mpf_feature1);
++
++ } else if (mpf->mpf_physptr) {
++
++ /*
++ * Read the physical hardware table. Anything here will
++ * override the defaults.
++ */
++ if (!smp_read_mpc(isa_bus_to_virt(mpf->mpf_physptr))) {
++ smp_found_config = 0;
++ printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
++ printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
++ return;
++ }
++ /*
++ * If there are no explicit MP IRQ entries, then we are
++ * broken. We set up most of the low 16 IO-APIC pins to
++ * ISA defaults and hope it will work.
++ */
++ if (!mp_irq_entries) {
++ struct mpc_config_bus bus;
++
++ printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
++
++ bus.mpc_type = MP_BUS;
++ bus.mpc_busid = 0;
++ memcpy(bus.mpc_bustype, "ISA ", 6);
++ MP_bus_info(&bus);
++
++ construct_default_ioirq_mptable(0);
++ }
++
++ } else
++ BUG();
++
++ printk(KERN_INFO "Processors: %d\n", num_processors);
++ /*
++ * Only use the first configuration found.
++ */
++}
++
++static int __init smp_scan_config (unsigned long base, unsigned long length)
++{
++ unsigned long *bp = isa_bus_to_virt(base);
++ struct intel_mp_floating *mpf;
++
++ Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
++ if (sizeof(*mpf) != 16)
++ printk("Error: MPF size\n");
++
++ while (length > 0) {
++ mpf = (struct intel_mp_floating *)bp;
++ if ((*bp == SMP_MAGIC_IDENT) &&
++ (mpf->mpf_length == 1) &&
++ !mpf_checksum((unsigned char *)bp, 16) &&
++ ((mpf->mpf_specification == 1)
++ || (mpf->mpf_specification == 4)) ) {
++
++ smp_found_config = 1;
++#ifndef CONFIG_XEN
++ printk(KERN_INFO "found SMP MP-table at %08lx\n",
++ virt_to_phys(mpf));
++ reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE);
++ if (mpf->mpf_physptr) {
++ /*
++ * We cannot access to MPC table to compute
++ * table size yet, as only few megabytes from
++ * the bottom is mapped now.
++ * PC-9800's MPC table places on the very last
++ * of physical memory; so that simply reserving
++ * PAGE_SIZE from mpg->mpf_physptr yields BUG()
++ * in reserve_bootmem.
++ */
++ unsigned long size = PAGE_SIZE;
++ unsigned long end = max_low_pfn * PAGE_SIZE;
++ if (mpf->mpf_physptr + size > end)
++ size = end - mpf->mpf_physptr;
++ reserve_bootmem(mpf->mpf_physptr, size);
++ }
++#else
++ printk(KERN_INFO "found SMP MP-table at %08lx\n",
++ ((unsigned long)bp - (unsigned long)isa_bus_to_virt(base)) + base);
++#endif
++
++ mpf_found = mpf;
++ return 1;
++ }
++ bp += 4;
++ length -= 16;
++ }
++ return 0;
++}
++
++void __init find_smp_config (void)
++{
++#ifndef CONFIG_XEN
++ unsigned int address;
++#endif
++
++ /*
++ * FIXME: Linux assumes you have 640K of base ram..
++ * this continues the error...
++ *
++ * 1) Scan the bottom 1K for a signature
++ * 2) Scan the top 1K of base RAM
++ * 3) Scan the 64K of bios
++ */
++ if (smp_scan_config(0x0,0x400) ||
++ smp_scan_config(639*0x400,0x400) ||
++ smp_scan_config(0xF0000,0x10000))
++ return;
++ /*
++ * If it is an SMP machine we should know now, unless the
++ * configuration is in an EISA/MCA bus machine with an
++ * extended bios data area.
++ *
++ * there is a real-mode segmented pointer pointing to the
++ * 4K EBDA area at 0x40E, calculate and scan it here.
++ *
++ * NOTE! There are Linux loaders that will corrupt the EBDA
++ * area, and as such this kind of SMP config may be less
++ * trustworthy, simply because the SMP table may have been
++ * stomped on during early boot. These loaders are buggy and
++ * should be fixed.
++ *
++ * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
++ */
++
++#ifndef CONFIG_XEN
++ address = get_bios_ebda();
++ if (address)
++ smp_scan_config(address, 0x400);
++#endif
++}
++
++int es7000_plat;
++
++/* --------------------------------------------------------------------------
++ ACPI-based MP Configuration
++ -------------------------------------------------------------------------- */
++
++#ifdef CONFIG_ACPI
++
++void __init mp_register_lapic_address (
++ u64 address)
++{
++#ifndef CONFIG_XEN
++ mp_lapic_addr = (unsigned long) address;
++
++ set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
++
++ if (boot_cpu_physical_apicid == -1U)
++ boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
++
++ Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
++#endif
++}
++
++
++void __devinit mp_register_lapic (
++ u8 id,
++ u8 enabled)
++{
++ struct mpc_config_processor processor;
++ int boot_cpu = 0;
++
++ if (MAX_APICS - id <= 0) {
++ printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
++ id, MAX_APICS);
++ return;
++ }
++
++ if (id == boot_cpu_physical_apicid)
++ boot_cpu = 1;
++
++#ifndef CONFIG_XEN
++ processor.mpc_type = MP_PROCESSOR;
++ processor.mpc_apicid = id;
++ processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
++ processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
++ processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
++ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
++ (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
++ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
++ processor.mpc_reserved[0] = 0;
++ processor.mpc_reserved[1] = 0;
++#endif
++
++ MP_processor_info(&processor);
++}
++
++#ifdef CONFIG_X86_IO_APIC
++
++#define MP_ISA_BUS 0
++#define MP_MAX_IOAPIC_PIN 127
++
++static struct mp_ioapic_routing {
++ int apic_id;
++ int gsi_base;
++ int gsi_end;
++ u32 pin_programmed[4];
++} mp_ioapic_routing[MAX_IO_APICS];
++
++
++static int mp_find_ioapic (
++ int gsi)
++{
++ int i = 0;
++
++ /* Find the IOAPIC that manages this GSI. */
++ for (i = 0; i < nr_ioapics; i++) {
++ if ((gsi >= mp_ioapic_routing[i].gsi_base)
++ && (gsi <= mp_ioapic_routing[i].gsi_end))
++ return i;
++ }
++
++ printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
++
++ return -1;
++}
++
++
++void __init mp_register_ioapic (
++ u8 id,
++ u32 address,
++ u32 gsi_base)
++{
++ int idx = 0;
++ int tmpid;
++
++ if (nr_ioapics >= MAX_IO_APICS) {
++ printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
++ "(found %d)\n", MAX_IO_APICS, nr_ioapics);
++ panic("Recompile kernel with bigger MAX_IO_APICS!\n");
++ }
++ if (!address) {
++ printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
++ " found in MADT table, skipping!\n");
++ return;
++ }
++
++ idx = nr_ioapics++;
++
++ mp_ioapics[idx].mpc_type = MP_IOAPIC;
++ mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
++ mp_ioapics[idx].mpc_apicaddr = address;
++
++#ifndef CONFIG_XEN
++ set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
++#endif
++ if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
++ && !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
++ tmpid = io_apic_get_unique_id(idx, id);
++ else
++ tmpid = id;
++ if (tmpid == -1) {
++ nr_ioapics--;
++ return;
++ }
++ mp_ioapics[idx].mpc_apicid = tmpid;
++ mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
++
++ /*
++ * Build basic GSI lookup table to facilitate gsi->io_apic lookups
++ * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
++ */
++ mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
++ mp_ioapic_routing[idx].gsi_base = gsi_base;
++ mp_ioapic_routing[idx].gsi_end = gsi_base +
++ io_apic_get_redir_entries(idx);
++
++ printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, "
++ "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
++ mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
++ mp_ioapic_routing[idx].gsi_base,
++ mp_ioapic_routing[idx].gsi_end);
++
++ return;
++}
++
++
++void __init mp_override_legacy_irq (
++ u8 bus_irq,
++ u8 polarity,
++ u8 trigger,
++ u32 gsi)
++{
++ struct mpc_config_intsrc intsrc;
++ int ioapic = -1;
++ int pin = -1;
++
++ /*
++ * Convert 'gsi' to 'ioapic.pin'.
++ */
++ ioapic = mp_find_ioapic(gsi);
++ if (ioapic < 0)
++ return;
++ pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
++
++ /*
++ * TBD: This check is for faulty timer entries, where the override
++ * erroneously sets the trigger to level, resulting in a HUGE
++ * increase of timer interrupts!
++ */
++ if ((bus_irq == 0) && (trigger == 3))
++ trigger = 1;
++
++ intsrc.mpc_type = MP_INTSRC;
++ intsrc.mpc_irqtype = mp_INT;
++ intsrc.mpc_irqflag = (trigger << 2) | polarity;
++ intsrc.mpc_srcbus = MP_ISA_BUS;
++ intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
++ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
++ intsrc.mpc_dstirq = pin; /* INTIN# */
++
++ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
++ intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
++ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
++ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
++
++ mp_irqs[mp_irq_entries] = intsrc;
++ if (++mp_irq_entries == MAX_IRQ_SOURCES)
++ panic("Max # of irq sources exceeded!\n");
++
++ return;
++}
++
++void __init mp_config_acpi_legacy_irqs (void)
++{
++ struct mpc_config_intsrc intsrc;
++ int i = 0;
++ int ioapic = -1;
++
++ /*
++ * Fabricate the legacy ISA bus (bus #31).
++ */
++ mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
++ Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
++
++ /*
++ * Older generations of ES7000 have no legacy identity mappings
++ */
++ if (es7000_plat == 1)
++ return;
++
++ /*
++ * Locate the IOAPIC that manages the ISA IRQs (0-15).
++ */
++ ioapic = mp_find_ioapic(0);
++ if (ioapic < 0)
++ return;
++
++ intsrc.mpc_type = MP_INTSRC;
++ intsrc.mpc_irqflag = 0; /* Conforming */
++ intsrc.mpc_srcbus = MP_ISA_BUS;
++ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
++
++ /*
++ * Use the default configuration for the IRQs 0-15. Unless
++ * overriden by (MADT) interrupt source override entries.
++ */
++ for (i = 0; i < 16; i++) {
++ int idx;
++
++ for (idx = 0; idx < mp_irq_entries; idx++) {
++ struct mpc_config_intsrc *irq = mp_irqs + idx;
++
++ /* Do we already have a mapping for this ISA IRQ? */
++ if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
++ break;
++
++ /* Do we already have a mapping for this IOAPIC pin */
++ if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
++ (irq->mpc_dstirq == i))
++ break;
++ }
++
++ if (idx != mp_irq_entries) {
++ printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
++ continue; /* IRQ already used */
++ }
++
++ intsrc.mpc_irqtype = mp_INT;
++ intsrc.mpc_srcbusirq = i; /* Identity mapped */
++ intsrc.mpc_dstirq = i;
++
++ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
++ "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
++ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
++ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
++ intsrc.mpc_dstirq);
++
++ mp_irqs[mp_irq_entries] = intsrc;
++ if (++mp_irq_entries == MAX_IRQ_SOURCES)
++ panic("Max # of irq sources exceeded!\n");
++ }
++}
++
++#define MAX_GSI_NUM 4096
++
++int mp_register_gsi (u32 gsi, int triggering, int polarity)
++{
++ int ioapic = -1;
++ int ioapic_pin = 0;
++ int idx, bit = 0;
++ static int pci_irq = 16;
++ /*
++ * Mapping between Global System Interrups, which
++ * represent all possible interrupts, and IRQs
++ * assigned to actual devices.
++ */
++ static int gsi_to_irq[MAX_GSI_NUM];
++
++ /* Don't set up the ACPI SCI because it's already set up */
++ if (acpi_fadt.sci_int == gsi)
++ return gsi;
++
++ ioapic = mp_find_ioapic(gsi);
++ if (ioapic < 0) {
++ printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
++ return gsi;
++ }
++
++ ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
++
++ if (ioapic_renumber_irq)
++ gsi = ioapic_renumber_irq(ioapic, gsi);
++
++ /*
++ * Avoid pin reprogramming. PRTs typically include entries
++ * with redundant pin->gsi mappings (but unique PCI devices);
++ * we only program the IOAPIC on the first.
++ */
++ bit = ioapic_pin % 32;
++ idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
++ if (idx > 3) {
++ printk(KERN_ERR "Invalid reference to IOAPIC pin "
++ "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
++ ioapic_pin);
++ return gsi;
++ }
++ if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
++ Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
++ mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
++ return gsi_to_irq[gsi];
++ }
++
++ mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
++
++ if (triggering == ACPI_LEVEL_SENSITIVE) {
++ /*
++ * For PCI devices assign IRQs in order, avoiding gaps
++ * due to unused I/O APIC pins.
++ */
++ int irq = gsi;
++ if (gsi < MAX_GSI_NUM) {
++ /*
++ * Retain the VIA chipset work-around (gsi > 15), but
++ * avoid a problem where the 8254 timer (IRQ0) is setup
++ * via an override (so it's not on pin 0 of the ioapic),
++ * and at the same time, the pin 0 interrupt is a PCI
++ * type. The gsi > 15 test could cause these two pins
++ * to be shared as IRQ0, and they are not shareable.
++ * So test for this condition, and if necessary, avoid
++ * the pin collision.
++ */
++ if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0))
++ gsi = pci_irq++;
++ /*
++ * Don't assign IRQ used by ACPI SCI
++ */
++ if (gsi == acpi_fadt.sci_int)
++ gsi = pci_irq++;
++ gsi_to_irq[irq] = gsi;
++ } else {
++ printk(KERN_ERR "GSI %u is too high\n", gsi);
++ return gsi;
++ }
++ }
++
++ io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
++ triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
++ polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
++ return gsi;
++}
++
++#endif /* CONFIG_X86_IO_APIC */
++#endif /* CONFIG_ACPI */
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/pci-dma-xen.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/pci-dma-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/pci-dma-xen.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/pci-dma-xen.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,382 @@
++/*
++ * Dynamic DMA mapping support.
++ *
++ * On i386 there is no hardware dynamic DMA address translation,
++ * so consistent alloc/free are merely page allocation/freeing.
++ * The rest of the dynamic DMA mapping interface is implemented
++ * in asm/pci.h.
++ */
++
++#include <linux/types.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/pci.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <asm/io.h>
++#include <xen/balloon.h>
++#include <xen/gnttab.h>
++#include <asm/swiotlb.h>
++#include <asm/tlbflush.h>
++#include <asm-i386/mach-xen/asm/swiotlb.h>
++#include <asm-i386/mach-xen/asm/gnttab_dma.h>
++#include <asm/bug.h>
++
++#ifdef __x86_64__
++#include <asm/proto.h>
++
++int iommu_merge __read_mostly = 0;
++EXPORT_SYMBOL(iommu_merge);
++
++dma_addr_t bad_dma_address __read_mostly;
++EXPORT_SYMBOL(bad_dma_address);
++
++/* This tells the BIO block layer to assume merging. Default to off
++ because we cannot guarantee merging later. */
++int iommu_bio_merge __read_mostly = 0;
++EXPORT_SYMBOL(iommu_bio_merge);
++
++int force_iommu __read_mostly= 0;
++
++__init int iommu_setup(char *p)
++{
++ return 1;
++}
++
++void __init pci_iommu_alloc(void)
++{
++#ifdef CONFIG_SWIOTLB
++ pci_swiotlb_init();
++#endif
++}
++
++static int __init pci_iommu_init(void)
++{
++ no_iommu_init();
++ return 0;
++}
++
++/* Must execute after PCI subsystem */
++fs_initcall(pci_iommu_init);
++#endif
++
++struct dma_coherent_mem {
++ void *virt_base;
++ u32 device_base;
++ int size;
++ int flags;
++ unsigned long *bitmap;
++};
++
++#define IOMMU_BUG_ON(test) \
++do { \
++ if (unlikely(test)) { \
++ printk(KERN_ALERT "Fatal DMA error! " \
++ "Please use 'swiotlb=force'\n"); \
++ BUG(); \
++ } \
++} while (0)
++
++int
++dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
++ enum dma_data_direction direction)
++{
++ int i, rc;
++
++ if (direction == DMA_NONE)
++ BUG();
++ WARN_ON(nents == 0 || sg[0].length == 0);
++
++ if (swiotlb) {
++ rc = swiotlb_map_sg(hwdev, sg, nents, direction);
++ } else {
++ for (i = 0; i < nents; i++ ) {
++ BUG_ON(!sg[i].page);
++ sg[i].dma_address =
++ gnttab_dma_map_page(sg[i].page) + sg[i].offset;
++ sg[i].dma_length = sg[i].length;
++ IOMMU_BUG_ON(address_needs_mapping(
++ hwdev, sg[i].dma_address));
++ IOMMU_BUG_ON(range_straddles_page_boundary(
++ page_to_pseudophys(sg[i].page) + sg[i].offset,
++ sg[i].length));
++ }
++ rc = nents;
++ }
++
++ flush_write_buffers();
++ return rc;
++}
++EXPORT_SYMBOL(dma_map_sg);
++
++void
++dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
++ enum dma_data_direction direction)
++{
++ int i;
++
++ BUG_ON(direction == DMA_NONE);
++ if (swiotlb)
++ swiotlb_unmap_sg(hwdev, sg, nents, direction);
++ else {
++ for (i = 0; i < nents; i++ )
++ gnttab_dma_unmap_page(sg[i].dma_address);
++ }
++}
++EXPORT_SYMBOL(dma_unmap_sg);
++
++#ifdef CONFIG_HIGHMEM
++dma_addr_t
++dma_map_page(struct device *dev, struct page *page, unsigned long offset,
++ size_t size, enum dma_data_direction direction)
++{
++ dma_addr_t dma_addr;
++
++ BUG_ON(direction == DMA_NONE);
++
++ if (swiotlb) {
++ dma_addr = swiotlb_map_page(
++ dev, page, offset, size, direction);
++ } else {
++ dma_addr = gnttab_dma_map_page(page) + offset;
++ IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
++ }
++
++ return dma_addr;
++}
++EXPORT_SYMBOL(dma_map_page);
++
++void
++dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
++ enum dma_data_direction direction)
++{
++ BUG_ON(direction == DMA_NONE);
++ if (swiotlb)
++ swiotlb_unmap_page(dev, dma_address, size, direction);
++ else
++ gnttab_dma_unmap_page(dma_address);
++}
++EXPORT_SYMBOL(dma_unmap_page);
++#endif /* CONFIG_HIGHMEM */
++
++int
++dma_mapping_error(dma_addr_t dma_addr)
++{
++ if (swiotlb)
++ return swiotlb_dma_mapping_error(dma_addr);
++ return 0;
++}
++EXPORT_SYMBOL(dma_mapping_error);
++
++int
++dma_supported(struct device *dev, u64 mask)
++{
++ if (swiotlb)
++ return swiotlb_dma_supported(dev, mask);
++ /*
++ * By default we'll BUG when an infeasible DMA is requested, and
++ * request swiotlb=force (see IOMMU_BUG_ON).
++ */
++ return 1;
++}
++EXPORT_SYMBOL(dma_supported);
++
++void *dma_alloc_coherent(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t gfp)
++{
++ void *ret;
++ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
++ unsigned int order = get_order(size);
++ unsigned long vstart;
++ u64 mask;
++
++ /* ignore region specifiers */
++ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
++
++ if (mem) {
++ int page = bitmap_find_free_region(mem->bitmap, mem->size,
++ order);
++ if (page >= 0) {
++ *dma_handle = mem->device_base + (page << PAGE_SHIFT);
++ ret = mem->virt_base + (page << PAGE_SHIFT);
++ memset(ret, 0, size);
++ return ret;
++ }
++ if (mem->flags & DMA_MEMORY_EXCLUSIVE)
++ return NULL;
++ }
++
++ if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
++ gfp |= GFP_DMA;
++
++ vstart = __get_free_pages(gfp, order);
++ ret = (void *)vstart;
++
++ if (dev != NULL && dev->coherent_dma_mask)
++ mask = dev->coherent_dma_mask;
++ else
++ mask = 0xffffffff;
++
++ if (ret != NULL) {
++ if (xen_create_contiguous_region(vstart, order,
++ fls64(mask)) != 0) {
++ free_pages(vstart, order);
++ return NULL;
++ }
++ memset(ret, 0, size);
++ *dma_handle = virt_to_bus(ret);
++ }
++ return ret;
++}
++EXPORT_SYMBOL(dma_alloc_coherent);
++
++void dma_free_coherent(struct device *dev, size_t size,
++ void *vaddr, dma_addr_t dma_handle)
++{
++ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
++ int order = get_order(size);
++
++ if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
++ int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
++
++ bitmap_release_region(mem->bitmap, page, order);
++ } else {
++ xen_destroy_contiguous_region((unsigned long)vaddr, order);
++ free_pages((unsigned long)vaddr, order);
++ }
++}
++EXPORT_SYMBOL(dma_free_coherent);
++
++#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
++int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
++ dma_addr_t device_addr, size_t size, int flags)
++{
++ void __iomem *mem_base;
++ int pages = size >> PAGE_SHIFT;
++ int bitmap_size = (pages + 31)/32;
++
++ if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
++ goto out;
++ if (!size)
++ goto out;
++ if (dev->dma_mem)
++ goto out;
++
++ /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
++
++ mem_base = ioremap(bus_addr, size);
++ if (!mem_base)
++ goto out;
++
++ dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
++ if (!dev->dma_mem)
++ goto out;
++ memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
++ dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
++ if (!dev->dma_mem->bitmap)
++ goto free1_out;
++ memset(dev->dma_mem->bitmap, 0, bitmap_size);
++
++ dev->dma_mem->virt_base = mem_base;
++ dev->dma_mem->device_base = device_addr;
++ dev->dma_mem->size = pages;
++ dev->dma_mem->flags = flags;
++
++ if (flags & DMA_MEMORY_MAP)
++ return DMA_MEMORY_MAP;
++
++ return DMA_MEMORY_IO;
++
++ free1_out:
++ kfree(dev->dma_mem->bitmap);
++ out:
++ return 0;
++}
++EXPORT_SYMBOL(dma_declare_coherent_memory);
++
++void dma_release_declared_memory(struct device *dev)
++{
++ struct dma_coherent_mem *mem = dev->dma_mem;
++
++ if(!mem)
++ return;
++ dev->dma_mem = NULL;
++ iounmap(mem->virt_base);
++ kfree(mem->bitmap);
++ kfree(mem);
++}
++EXPORT_SYMBOL(dma_release_declared_memory);
++
++void *dma_mark_declared_memory_occupied(struct device *dev,
++ dma_addr_t device_addr, size_t size)
++{
++ struct dma_coherent_mem *mem = dev->dma_mem;
++ int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ int pos, err;
++
++ if (!mem)
++ return ERR_PTR(-EINVAL);
++
++ pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
++ err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
++ if (err != 0)
++ return ERR_PTR(err);
++ return mem->virt_base + (pos << PAGE_SHIFT);
++}
++EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
++#endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
++
++dma_addr_t
++dma_map_single(struct device *dev, void *ptr, size_t size,
++ enum dma_data_direction direction)
++{
++ dma_addr_t dma;
++
++ if (direction == DMA_NONE)
++ BUG();
++ WARN_ON(size == 0);
++
++ if (swiotlb) {
++ dma = swiotlb_map_single(dev, ptr, size, direction);
++ } else {
++ dma = gnttab_dma_map_page(virt_to_page(ptr)) +
++ offset_in_page(ptr);
++ IOMMU_BUG_ON(range_straddles_page_boundary(__pa(ptr), size));
++ IOMMU_BUG_ON(address_needs_mapping(dev, dma));
++ }
++
++ flush_write_buffers();
++ return dma;
++}
++EXPORT_SYMBOL(dma_map_single);
++
++void
++dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
++ enum dma_data_direction direction)
++{
++ if (direction == DMA_NONE)
++ BUG();
++ if (swiotlb)
++ swiotlb_unmap_single(dev, dma_addr, size, direction);
++ else
++ gnttab_dma_unmap_page(dma_addr);
++}
++EXPORT_SYMBOL(dma_unmap_single);
++
++void
++dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
++ enum dma_data_direction direction)
++{
++ if (swiotlb)
++ swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction);
++}
++EXPORT_SYMBOL(dma_sync_single_for_cpu);
++
++void
++dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
++ enum dma_data_direction direction)
++{
++ if (swiotlb)
++ swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
++}
++EXPORT_SYMBOL(dma_sync_single_for_device);
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/process-xen.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/process-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/process-xen.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/process-xen.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,856 @@
++/*
++ * linux/arch/i386/kernel/process.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ *
++ * Pentium III FXSR, SSE support
++ * Gareth Hughes <gareth@valinux.com>, May 2000
++ */
++
++/*
++ * This file handles the architecture-dependent parts of process handling..
++ */
++
++#include <stdarg.h>
++
++#include <linux/cpu.h>
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/elfcore.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/stddef.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/user.h>
++#include <linux/a.out.h>
++#include <linux/interrupt.h>
++#include <linux/utsname.h>
++#include <linux/delay.h>
++#include <linux/reboot.h>
++#include <linux/init.h>
++#include <linux/mc146818rtc.h>
++#include <linux/module.h>
++#include <linux/kallsyms.h>
++#include <linux/ptrace.h>
++#include <linux/random.h>
++
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/system.h>
++#include <asm/io.h>
++#include <asm/ldt.h>
++#include <asm/processor.h>
++#include <asm/i387.h>
++#include <asm/desc.h>
++#include <asm/vm86.h>
++#ifdef CONFIG_MATH_EMULATION
++#include <asm/math_emu.h>
++#endif
++
++#include <xen/interface/physdev.h>
++#include <xen/interface/vcpu.h>
++#include <xen/cpu_hotplug.h>
++
++#include <linux/err.h>
++
++#include <asm/tlbflush.h>
++#include <asm/cpu.h>
++
++asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
++
++static int hlt_counter;
++
++unsigned long boot_option_idle_override = 0;
++EXPORT_SYMBOL(boot_option_idle_override);
++
++/*
++ * Return saved PC of a blocked thread.
++ */
++unsigned long thread_saved_pc(struct task_struct *tsk)
++{
++ return ((unsigned long *)tsk->thread.esp)[3];
++}
++
++/*
++ * Powermanagement idle function, if any..
++ */
++void (*pm_idle)(void);
++EXPORT_SYMBOL(pm_idle);
++static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
++
++void disable_hlt(void)
++{
++ hlt_counter++;
++}
++
++EXPORT_SYMBOL(disable_hlt);
++
++void enable_hlt(void)
++{
++ hlt_counter--;
++}
++
++EXPORT_SYMBOL(enable_hlt);
++
++/*
++ * On SMP it's slightly faster (but much more power-consuming!)
++ * to poll the ->work.need_resched flag instead of waiting for the
++ * cross-CPU IPI to arrive. Use this option with caution.
++ */
++static void poll_idle (void)
++{
++ local_irq_enable();
++
++ asm volatile(
++ "2:"
++ "testl %0, %1;"
++ "rep; nop;"
++ "je 2b;"
++ : : "i"(_TIF_NEED_RESCHED), "m" (current_thread_info()->flags));
++}
++
++static void xen_idle(void)
++{
++ local_irq_disable();
++
++ if (need_resched())
++ local_irq_enable();
++ else {
++ current_thread_info()->status &= ~TS_POLLING;
++ smp_mb__after_clear_bit();
++ safe_halt();
++ current_thread_info()->status |= TS_POLLING;
++ }
++}
++#ifdef CONFIG_APM_MODULE
++EXPORT_SYMBOL(default_idle);
++#endif
++
++#ifdef CONFIG_HOTPLUG_CPU
++extern cpumask_t cpu_initialized;
++static inline void play_dead(void)
++{
++ idle_task_exit();
++ local_irq_disable();
++ cpu_clear(smp_processor_id(), cpu_initialized);
++ preempt_enable_no_resched();
++ VOID(HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL));
++ cpu_bringup();
++}
++#else
++static inline void play_dead(void)
++{
++ BUG();
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++/*
++ * The idle thread. There's no useful work to be
++ * done, so just try to conserve power and have a
++ * low exit latency (ie sit in a loop waiting for
++ * somebody to say that they'd like to reschedule)
++ */
++void cpu_idle(void)
++{
++ int cpu = smp_processor_id();
++
++ current_thread_info()->status |= TS_POLLING;
++
++ /* endless idle loop with no priority at all */
++ while (1) {
++ while (!need_resched()) {
++ void (*idle)(void);
++
++ if (__get_cpu_var(cpu_idle_state))
++ __get_cpu_var(cpu_idle_state) = 0;
++
++ rmb();
++ idle = xen_idle; /* no alternatives */
++
++ if (cpu_is_offline(cpu))
++ play_dead();
++
++ __get_cpu_var(irq_stat).idle_timestamp = jiffies;
++ idle();
++ }
++ preempt_enable_no_resched();
++ schedule();
++ preempt_disable();
++ }
++}
++
++void cpu_idle_wait(void)
++{
++ unsigned int cpu, this_cpu = get_cpu();
++ cpumask_t map;
++
++ set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
++ put_cpu();
++
++ cpus_clear(map);
++ for_each_online_cpu(cpu) {
++ per_cpu(cpu_idle_state, cpu) = 1;
++ cpu_set(cpu, map);
++ }
++
++ __get_cpu_var(cpu_idle_state) = 0;
++
++ wmb();
++ do {
++ ssleep(1);
++ for_each_online_cpu(cpu) {
++ if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
++ cpu_clear(cpu, map);
++ }
++ cpus_and(map, map, cpu_online_map);
++ } while (!cpus_empty(map));
++}
++EXPORT_SYMBOL_GPL(cpu_idle_wait);
++
++void __devinit select_idle_routine(const struct cpuinfo_x86 *c)
++{
++}
++
++static int __init idle_setup (char *str)
++{
++ if (!strncmp(str, "poll", 4)) {
++ printk("using polling idle threads.\n");
++ pm_idle = poll_idle;
++ }
++
++ boot_option_idle_override = 1;
++ return 1;
++}
++
++__setup("idle=", idle_setup);
++
++void show_regs(struct pt_regs * regs)
++{
++ unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
++
++ printk("\n");
++ printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
++ printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
++ print_symbol("EIP is at %s\n", regs->eip);
++
++ if (user_mode_vm(regs))
++ printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
++ printk(" EFLAGS: %08lx %s (%s %.*s)\n",
++ regs->eflags, print_tainted(), system_utsname.release,
++ (int)strcspn(system_utsname.version, " "),
++ system_utsname.version);
++ printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
++ regs->eax,regs->ebx,regs->ecx,regs->edx);
++ printk("ESI: %08lx EDI: %08lx EBP: %08lx",
++ regs->esi, regs->edi, regs->ebp);
++ printk(" DS: %04x ES: %04x\n",
++ 0xffff & regs->xds,0xffff & regs->xes);
++
++ cr0 = read_cr0();
++ cr2 = read_cr2();
++ cr3 = read_cr3();
++ cr4 = read_cr4_safe();
++ printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
++ show_trace(NULL, regs, &regs->esp);
++}
++
++/*
++ * This gets run with %ebx containing the
++ * function to call, and %edx containing
++ * the "args".
++ */
++extern void kernel_thread_helper(void);
++__asm__(".section .text\n"
++ ".align 4\n"
++ "kernel_thread_helper:\n\t"
++ "movl %edx,%eax\n\t"
++ "pushl %edx\n\t"
++ "call *%ebx\n\t"
++ "pushl %eax\n\t"
++ "call do_exit\n"
++ ".previous");
++
++/*
++ * Create a kernel thread
++ */
++int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
++{
++ struct pt_regs regs;
++
++ memset(&regs, 0, sizeof(regs));
++
++ regs.ebx = (unsigned long) fn;
++ regs.edx = (unsigned long) arg;
++
++ regs.xds = __USER_DS;
++ regs.xes = __USER_DS;
++ regs.orig_eax = -1;
++ regs.eip = (unsigned long) kernel_thread_helper;
++ regs.xcs = GET_KERNEL_CS();
++ regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
++
++ /* Ok, create the new process.. */
++ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
++}
++EXPORT_SYMBOL(kernel_thread);
++
++/*
++ * Free current thread data structures etc..
++ */
++void exit_thread(void)
++{
++ /* The process may have allocated an io port bitmap... nuke it. */
++ if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
++ struct task_struct *tsk = current;
++ struct thread_struct *t = &tsk->thread;
++ struct physdev_set_iobitmap set_iobitmap;
++ memset(&set_iobitmap, 0, sizeof(set_iobitmap));
++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap,
++ &set_iobitmap));
++ kfree(t->io_bitmap_ptr);
++ t->io_bitmap_ptr = NULL;
++ clear_thread_flag(TIF_IO_BITMAP);
++ }
++}
++
++void flush_thread(void)
++{
++ struct task_struct *tsk = current;
++
++ memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
++ memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
++ clear_tsk_thread_flag(tsk, TIF_DEBUG);
++ /*
++ * Forget coprocessor state..
++ */
++ clear_fpu(tsk);
++ clear_used_math();
++}
++
++void release_thread(struct task_struct *dead_task)
++{
++ BUG_ON(dead_task->mm);
++ release_vm86_irqs(dead_task);
++}
++
++/*
++ * This gets called before we allocate a new thread and copy
++ * the current task into it.
++ */
++void prepare_to_copy(struct task_struct *tsk)
++{
++ unlazy_fpu(tsk);
++}
++
++int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
++ unsigned long unused,
++ struct task_struct * p, struct pt_regs * regs)
++{
++ struct pt_regs * childregs;
++ struct task_struct *tsk;
++ int err;
++
++ childregs = task_pt_regs(p);
++ *childregs = *regs;
++ childregs->eax = 0;
++ childregs->esp = esp;
++
++ p->thread.esp = (unsigned long) childregs;
++ p->thread.esp0 = (unsigned long) (childregs+1);
++
++ p->thread.eip = (unsigned long) ret_from_fork;
++
++ savesegment(fs,p->thread.fs);
++ savesegment(gs,p->thread.gs);
++
++ tsk = current;
++ if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
++ p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
++ if (!p->thread.io_bitmap_ptr) {
++ p->thread.io_bitmap_max = 0;
++ return -ENOMEM;
++ }
++ memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr,
++ IO_BITMAP_BYTES);
++ set_tsk_thread_flag(p, TIF_IO_BITMAP);
++ }
++
++ /*
++ * Set a new TLS for the child thread?
++ */
++ if (clone_flags & CLONE_SETTLS) {
++ struct desc_struct *desc;
++ struct user_desc info;
++ int idx;
++
++ err = -EFAULT;
++ if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
++ goto out;
++ err = -EINVAL;
++ if (LDT_empty(&info))
++ goto out;
++
++ idx = info.entry_number;
++ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
++ goto out;
++
++ desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
++ desc->a = LDT_entry_a(&info);
++ desc->b = LDT_entry_b(&info);
++ }
++
++ p->thread.iopl = current->thread.iopl;
++
++ err = 0;
++ out:
++ if (err && p->thread.io_bitmap_ptr) {
++ kfree(p->thread.io_bitmap_ptr);
++ p->thread.io_bitmap_max = 0;
++ }
++ return err;
++}
++
++/*
++ * fill in the user structure for a core dump..
++ */
++void dump_thread(struct pt_regs * regs, struct user * dump)
++{
++ int i;
++
++/* changed the size calculations - should hopefully work better. lbt */
++ dump->magic = CMAGIC;
++ dump->start_code = 0;
++ dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
++ dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
++ dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
++ dump->u_dsize -= dump->u_tsize;
++ dump->u_ssize = 0;
++ for (i = 0; i < 8; i++)
++ dump->u_debugreg[i] = current->thread.debugreg[i];
++
++ if (dump->start_stack < TASK_SIZE)
++ dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
++
++ dump->regs.ebx = regs->ebx;
++ dump->regs.ecx = regs->ecx;
++ dump->regs.edx = regs->edx;
++ dump->regs.esi = regs->esi;
++ dump->regs.edi = regs->edi;
++ dump->regs.ebp = regs->ebp;
++ dump->regs.eax = regs->eax;
++ dump->regs.ds = regs->xds;
++ dump->regs.es = regs->xes;
++ savesegment(fs,dump->regs.fs);
++ savesegment(gs,dump->regs.gs);
++ dump->regs.orig_eax = regs->orig_eax;
++ dump->regs.eip = regs->eip;
++ dump->regs.cs = regs->xcs;
++ dump->regs.eflags = regs->eflags;
++ dump->regs.esp = regs->esp;
++ dump->regs.ss = regs->xss;
++
++ dump->u_fpvalid = dump_fpu (regs, &dump->i387);
++}
++EXPORT_SYMBOL(dump_thread);
++
++/*
++ * Capture the user space registers if the task is not running (in user space)
++ */
++int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
++{
++ struct pt_regs ptregs = *task_pt_regs(tsk);
++ ptregs.xcs &= 0xffff;
++ ptregs.xds &= 0xffff;
++ ptregs.xes &= 0xffff;
++ ptregs.xss &= 0xffff;
++
++ elf_core_copy_regs(regs, &ptregs);
++
++ return 1;
++}
++
++static noinline void __switch_to_xtra(struct task_struct *next_p)
++{
++ struct thread_struct *next;
++
++ next = &next_p->thread;
++
++ if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
++ set_debugreg(next->debugreg[0], 0);
++ set_debugreg(next->debugreg[1], 1);
++ set_debugreg(next->debugreg[2], 2);
++ set_debugreg(next->debugreg[3], 3);
++ /* no 4 and 5 */
++ set_debugreg(next->debugreg[6], 6);
++ set_debugreg(next->debugreg[7], 7);
++ }
++}
++
++/*
++ * This function selects if the context switch from prev to next
++ * has to tweak the TSC disable bit in the cr4.
++ */
++static inline void disable_tsc(struct task_struct *prev_p,
++ struct task_struct *next_p)
++{
++ struct thread_info *prev, *next;
++
++ /*
++ * gcc should eliminate the ->thread_info dereference if
++ * has_secure_computing returns 0 at compile time (SECCOMP=n).
++ */
++ prev = task_thread_info(prev_p);
++ next = task_thread_info(next_p);
++
++ if (has_secure_computing(prev) || has_secure_computing(next)) {
++ /* slow path here */
++ if (has_secure_computing(prev) &&
++ !has_secure_computing(next)) {
++ write_cr4(read_cr4() & ~X86_CR4_TSD);
++ } else if (!has_secure_computing(prev) &&
++ has_secure_computing(next))
++ write_cr4(read_cr4() | X86_CR4_TSD);
++ }
++}
++
++/*
++ * switch_to(x,yn) should switch tasks from x to y.
++ *
++ * We fsave/fwait so that an exception goes off at the right time
++ * (as a call from the fsave or fwait in effect) rather than to
++ * the wrong process. Lazy FP saving no longer makes any sense
++ * with modern CPU's, and this simplifies a lot of things (SMP
++ * and UP become the same).
++ *
++ * NOTE! We used to use the x86 hardware context switching. The
++ * reason for not using it any more becomes apparent when you
++ * try to recover gracefully from saved state that is no longer
++ * valid (stale segment register values in particular). With the
++ * hardware task-switch, there is no way to fix up bad state in
++ * a reasonable manner.
++ *
++ * The fact that Intel documents the hardware task-switching to
++ * be slow is a fairly red herring - this code is not noticeably
++ * faster. However, there _is_ some room for improvement here,
++ * so the performance issues may eventually be a valid point.
++ * More important, however, is the fact that this allows us much
++ * more flexibility.
++ *
++ * The return value (in %eax) will be the "prev" task after
++ * the task-switch, and shows up in ret_from_fork in entry.S,
++ * for example.
++ */
++struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
++{
++ struct thread_struct *prev = &prev_p->thread,
++ *next = &next_p->thread;
++ int cpu = smp_processor_id();
++#ifndef CONFIG_X86_NO_TSS
++ struct tss_struct *tss = &per_cpu(init_tss, cpu);
++#endif
++ struct physdev_set_iopl iopl_op;
++ struct physdev_set_iobitmap iobmp_op;
++ multicall_entry_t _mcl[8], *mcl = _mcl;
++
++ /* XEN NOTE: FS/GS saved in switch_mm(), not here. */
++
++ /*
++ * This is basically '__unlazy_fpu', except that we queue a
++ * multicall to indicate FPU task switch, rather than
++ * synchronously trapping to Xen.
++ */
++ if (prev_p->thread_info->status & TS_USEDFPU) {
++ __save_init_fpu(prev_p); /* _not_ save_init_fpu() */
++ mcl->op = __HYPERVISOR_fpu_taskswitch;
++ mcl->args[0] = 1;
++ mcl++;
++ }
++#if 0 /* lazy fpu sanity check */
++ else BUG_ON(!(read_cr0() & 8));
++#endif
++
++ /*
++ * Reload esp0.
++ * This is load_esp0(tss, next) with a multicall.
++ */
++ mcl->op = __HYPERVISOR_stack_switch;
++ mcl->args[0] = __KERNEL_DS;
++ mcl->args[1] = next->esp0;
++ mcl++;
++
++ /*
++ * Load the per-thread Thread-Local Storage descriptor.
++ * This is load_TLS(next, cpu) with multicalls.
++ */
++#define C(i) do { \
++ if (unlikely(next->tls_array[i].a != prev->tls_array[i].a || \
++ next->tls_array[i].b != prev->tls_array[i].b)) { \
++ mcl->op = __HYPERVISOR_update_descriptor; \
++ *(u64 *)&mcl->args[0] = virt_to_machine( \
++ &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]);\
++ *(u64 *)&mcl->args[2] = *(u64 *)&next->tls_array[i]; \
++ mcl++; \
++ } \
++} while (0)
++ C(0); C(1); C(2);
++#undef C
++
++ if (unlikely(prev->iopl != next->iopl)) {
++ iopl_op.iopl = (next->iopl == 0) ? 1 : (next->iopl >> 12) & 3;
++ mcl->op = __HYPERVISOR_physdev_op;
++ mcl->args[0] = PHYSDEVOP_set_iopl;
++ mcl->args[1] = (unsigned long)&iopl_op;
++ mcl++;
++ }
++
++ if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
++ set_xen_guest_handle(iobmp_op.bitmap,
++ (char *)next->io_bitmap_ptr);
++ iobmp_op.nr_ports = next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
++ mcl->op = __HYPERVISOR_physdev_op;
++ mcl->args[0] = PHYSDEVOP_set_iobitmap;
++ mcl->args[1] = (unsigned long)&iobmp_op;
++ mcl++;
++ }
++
++ BUG_ON(mcl > _mcl + ARRAY_SIZE(_mcl));
++ if (unlikely(HYPERVISOR_multicall_check(_mcl, mcl - _mcl, NULL)))
++ BUG();
++
++ /*
++ * Restore %fs and %gs if needed.
++ *
++ * Glibc normally makes %fs be zero, and %gs is one of
++ * the TLS segments.
++ */
++ if (unlikely(next->fs))
++ loadsegment(fs, next->fs);
++
++ if (next->gs)
++ loadsegment(gs, next->gs);
++
++ /*
++ * Now maybe handle debug registers
++ */
++ if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW))
++ __switch_to_xtra(next_p);
++
++ disable_tsc(prev_p, next_p);
++
++ return prev_p;
++}
++
++asmlinkage int sys_fork(struct pt_regs regs)
++{
++ return do_fork(SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
++}
++
++asmlinkage int sys_clone(struct pt_regs regs)
++{
++ unsigned long clone_flags;
++ unsigned long newsp;
++ int __user *parent_tidptr, *child_tidptr;
++
++ clone_flags = regs.ebx;
++ newsp = regs.ecx;
++ parent_tidptr = (int __user *)regs.edx;
++ child_tidptr = (int __user *)regs.edi;
++ if (!newsp)
++ newsp = regs.esp;
++ return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
++}
++
++/*
++ * This is trivial, and on the face of it looks like it
++ * could equally well be done in user mode.
++ *
++ * Not so, for quite unobvious reasons - register pressure.
++ * In user mode vfork() cannot have a stack frame, and if
++ * done by calling the "clone()" system call directly, you
++ * do not have enough call-clobbered registers to hold all
++ * the information you need.
++ */
++asmlinkage int sys_vfork(struct pt_regs regs)
++{
++ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
++}
++
++/*
++ * sys_execve() executes a new program.
++ */
++asmlinkage int sys_execve(struct pt_regs regs)
++{
++ int error;
++ char * filename;
++
++ filename = getname((char __user *) regs.ebx);
++ error = PTR_ERR(filename);
++ if (IS_ERR(filename))
++ goto out;
++ error = do_execve(filename,
++ (char __user * __user *) regs.ecx,
++ (char __user * __user *) regs.edx,
++ &regs);
++ if (error == 0) {
++ task_lock(current);
++ current->ptrace &= ~PT_DTRACE;
++ task_unlock(current);
++ /* Make sure we don't return using sysenter.. */
++ set_thread_flag(TIF_IRET);
++ }
++ putname(filename);
++out:
++ return error;
++}
++
++#define top_esp (THREAD_SIZE - sizeof(unsigned long))
++#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
++
++unsigned long get_wchan(struct task_struct *p)
++{
++ unsigned long ebp, esp, eip;
++ unsigned long stack_page;
++ int count = 0;
++ if (!p || p == current || p->state == TASK_RUNNING)
++ return 0;
++ stack_page = (unsigned long)task_stack_page(p);
++ esp = p->thread.esp;
++ if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
++ return 0;
++ /* include/asm-i386/system.h:switch_to() pushes ebp last. */
++ ebp = *(unsigned long *) esp;
++ do {
++ if (ebp < stack_page || ebp > top_ebp+stack_page)
++ return 0;
++ eip = *(unsigned long *) (ebp+4);
++ if (!in_sched_functions(eip))
++ return eip;
++ ebp = *(unsigned long *) ebp;
++ } while (count++ < 16);
++ return 0;
++}
++
++/*
++ * sys_alloc_thread_area: get a yet unused TLS descriptor index.
++ */
++static int get_free_idx(void)
++{
++ struct thread_struct *t = &current->thread;
++ int idx;
++
++ for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
++ if (desc_empty(t->tls_array + idx))
++ return idx + GDT_ENTRY_TLS_MIN;
++ return -ESRCH;
++}
++
++/*
++ * Set a given TLS descriptor:
++ */
++asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
++{
++ struct thread_struct *t = &current->thread;
++ struct user_desc info;
++ struct desc_struct *desc;
++ int cpu, idx;
++
++ if (copy_from_user(&info, u_info, sizeof(info)))
++ return -EFAULT;
++ idx = info.entry_number;
++
++ /*
++ * index -1 means the kernel should try to find and
++ * allocate an empty descriptor:
++ */
++ if (idx == -1) {
++ idx = get_free_idx();
++ if (idx < 0)
++ return idx;
++ if (put_user(idx, &u_info->entry_number))
++ return -EFAULT;
++ }
++
++ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
++ return -EINVAL;
++
++ desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
++
++ /*
++ * We must not get preempted while modifying the TLS.
++ */
++ cpu = get_cpu();
++
++ if (LDT_empty(&info)) {
++ desc->a = 0;
++ desc->b = 0;
++ } else {
++ desc->a = LDT_entry_a(&info);
++ desc->b = LDT_entry_b(&info);
++ }
++ load_TLS(t, cpu);
++
++ put_cpu();
++
++ return 0;
++}
++
++/*
++ * Get the current Thread-Local Storage area:
++ */
++
++#define GET_BASE(desc) ( \
++ (((desc)->a >> 16) & 0x0000ffff) | \
++ (((desc)->b << 16) & 0x00ff0000) | \
++ ( (desc)->b & 0xff000000) )
++
++#define GET_LIMIT(desc) ( \
++ ((desc)->a & 0x0ffff) | \
++ ((desc)->b & 0xf0000) )
++
++#define GET_32BIT(desc) (((desc)->b >> 22) & 1)
++#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
++#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
++#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
++#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
++#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
++
++asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
++{
++ struct user_desc info;
++ struct desc_struct *desc;
++ int idx;
++
++ if (get_user(idx, &u_info->entry_number))
++ return -EFAULT;
++ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
++ return -EINVAL;
++
++ memset(&info, 0, sizeof(info));
++
++ desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
++
++ info.entry_number = idx;
++ info.base_addr = GET_BASE(desc);
++ info.limit = GET_LIMIT(desc);
++ info.seg_32bit = GET_32BIT(desc);
++ info.contents = GET_CONTENTS(desc);
++ info.read_exec_only = !GET_WRITABLE(desc);
++ info.limit_in_pages = GET_LIMIT_PAGES(desc);
++ info.seg_not_present = !GET_PRESENT(desc);
++ info.useable = GET_USEABLE(desc);
++
++ if (copy_to_user(u_info, &info, sizeof(info)))
++ return -EFAULT;
++ return 0;
++}
++
++unsigned long arch_align_stack(unsigned long sp)
++{
++ if (randomize_va_space)
++ sp -= get_random_int() % 8192;
++ return sp & ~0xf;
++}
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/quirks-xen.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/quirks-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/quirks-xen.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/quirks-xen.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,47 @@
++/*
++ * This file contains work-arounds for x86 and x86_64 platform bugs.
++ */
++#include <linux/pci.h>
++#include <linux/irq.h>
++
++#if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_SMP) || defined(CONFIG_XEN)) && defined(CONFIG_PCI)
++
++static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
++{
++ u8 config, rev;
++ u32 word;
++
++ /* BIOS may enable hardware IRQ balancing for
++ * E7520/E7320/E7525(revision ID 0x9 and below)
++ * based platforms.
++ * Disable SW irqbalance/affinity on those platforms.
++ */
++ pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
++ if (rev > 0x9)
++ return;
++
++ printk(KERN_INFO "Intel E7520/7320/7525 detected.");
++
++ /* enable access to config space*/
++ pci_read_config_byte(dev, 0xf4, &config);
++ pci_write_config_byte(dev, 0xf4, config|0x2);
++
++ /* read xTPR register */
++ raw_pci_ops->read(0, 0, 0x40, 0x4c, 2, &word);
++
++ if (!(word & (1 << 13))) {
++ struct xen_platform_op op;
++ printk(KERN_INFO "Disabling irq balancing and affinity\n");
++ op.cmd = XENPF_platform_quirk;
++ op.u.platform_quirk.quirk_id = QUIRK_NOIRQBALANCING;
++ WARN_ON(HYPERVISOR_platform_op(&op));
++ }
++
++ /* put back the original value for config space*/
++ if (!(config & 0x2))
++ pci_write_config_byte(dev, 0xf4, config);
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance);
++#endif
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/relocate_kernel.S linux-2.6.18-xen-3.2.0/arch/i386/kernel/relocate_kernel.S
+--- linux-2.6.18.8/arch/i386/kernel/relocate_kernel.S 2007-02-23 15:52:30.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/relocate_kernel.S 2008-02-15 16:21:49.000000000 -0800
+@@ -7,16 +7,138 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/page.h>
++#include <asm/kexec.h>
++
++/*
++ * Must be relocatable PIC code callable as a C function
++ */
++
++#define PTR(x) (x << 2)
++#define PAGE_ALIGNED (1 << PAGE_SHIFT)
++#define PAGE_ATTR 0x63 /* _PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY */
++#define PAE_PGD_ATTR 0x01 /* _PAGE_PRESENT */
++
++ .text
++ .align PAGE_ALIGNED
++ .globl relocate_kernel
++relocate_kernel:
++ movl 8(%esp), %ebp /* list of pages */
++
++#ifdef CONFIG_X86_PAE
++ /* map the control page at its virtual address */
++
++ movl PTR(VA_PGD)(%ebp), %edi
++ movl PTR(VA_CONTROL_PAGE)(%ebp), %eax
++ andl $0xc0000000, %eax
++ shrl $27, %eax
++ addl %edi, %eax
++
++ movl PTR(PA_PMD_0)(%ebp), %edx
++ orl $PAE_PGD_ATTR, %edx
++ movl %edx, (%eax)
++
++ movl PTR(VA_PMD_0)(%ebp), %edi
++ movl PTR(VA_CONTROL_PAGE)(%ebp), %eax
++ andl $0x3fe00000, %eax
++ shrl $18, %eax
++ addl %edi, %eax
++
++ movl PTR(PA_PTE_0)(%ebp), %edx
++ orl $PAGE_ATTR, %edx
++ movl %edx, (%eax)
++
++ movl PTR(VA_PTE_0)(%ebp), %edi
++ movl PTR(VA_CONTROL_PAGE)(%ebp), %eax
++ andl $0x001ff000, %eax
++ shrl $9, %eax
++ addl %edi, %eax
++
++ movl PTR(PA_CONTROL_PAGE)(%ebp), %edx
++ orl $PAGE_ATTR, %edx
++ movl %edx, (%eax)
++
++ /* identity map the control page at its physical address */
++
++ movl PTR(VA_PGD)(%ebp), %edi
++ movl PTR(PA_CONTROL_PAGE)(%ebp), %eax
++ andl $0xc0000000, %eax
++ shrl $27, %eax
++ addl %edi, %eax
++
++ movl PTR(PA_PMD_1)(%ebp), %edx
++ orl $PAE_PGD_ATTR, %edx
++ movl %edx, (%eax)
++
++ movl PTR(VA_PMD_1)(%ebp), %edi
++ movl PTR(PA_CONTROL_PAGE)(%ebp), %eax
++ andl $0x3fe00000, %eax
++ shrl $18, %eax
++ addl %edi, %eax
++
++ movl PTR(PA_PTE_1)(%ebp), %edx
++ orl $PAGE_ATTR, %edx
++ movl %edx, (%eax)
++
++ movl PTR(VA_PTE_1)(%ebp), %edi
++ movl PTR(PA_CONTROL_PAGE)(%ebp), %eax
++ andl $0x001ff000, %eax
++ shrl $9, %eax
++ addl %edi, %eax
++
++ movl PTR(PA_CONTROL_PAGE)(%ebp), %edx
++ orl $PAGE_ATTR, %edx
++ movl %edx, (%eax)
++#else
++ /* map the control page at its virtual address */
++
++ movl PTR(VA_PGD)(%ebp), %edi
++ movl PTR(VA_CONTROL_PAGE)(%ebp), %eax
++ andl $0xffc00000, %eax
++ shrl $20, %eax
++ addl %edi, %eax
++
++ movl PTR(PA_PTE_0)(%ebp), %edx
++ orl $PAGE_ATTR, %edx
++ movl %edx, (%eax)
++
++ movl PTR(VA_PTE_0)(%ebp), %edi
++ movl PTR(VA_CONTROL_PAGE)(%ebp), %eax
++ andl $0x003ff000, %eax
++ shrl $10, %eax
++ addl %edi, %eax
++
++ movl PTR(PA_CONTROL_PAGE)(%ebp), %edx
++ orl $PAGE_ATTR, %edx
++ movl %edx, (%eax)
++
++ /* identity map the control page at its physical address */
++
++ movl PTR(VA_PGD)(%ebp), %edi
++ movl PTR(PA_CONTROL_PAGE)(%ebp), %eax
++ andl $0xffc00000, %eax
++ shrl $20, %eax
++ addl %edi, %eax
++
++ movl PTR(PA_PTE_1)(%ebp), %edx
++ orl $PAGE_ATTR, %edx
++ movl %edx, (%eax)
++
++ movl PTR(VA_PTE_1)(%ebp), %edi
++ movl PTR(PA_CONTROL_PAGE)(%ebp), %eax
++ andl $0x003ff000, %eax
++ shrl $10, %eax
++ addl %edi, %eax
++
++ movl PTR(PA_CONTROL_PAGE)(%ebp), %edx
++ orl $PAGE_ATTR, %edx
++ movl %edx, (%eax)
++#endif
+
+- /*
+- * Must be relocatable PIC code callable as a C function, that once
+- * it starts can not use the previous processes stack.
+- */
+- .globl relocate_new_kernel
+ relocate_new_kernel:
+ /* read the arguments and say goodbye to the stack */
+ movl 4(%esp), %ebx /* page_list */
+- movl 8(%esp), %ebp /* reboot_code_buffer */
++ movl 8(%esp), %ebp /* list of pages */
+ movl 12(%esp), %edx /* start address */
+ movl 16(%esp), %ecx /* cpu_has_pae */
+
+@@ -24,11 +146,57 @@ relocate_new_kernel:
+ pushl $0
+ popfl
+
+- /* set a new stack at the bottom of our page... */
+- lea 4096(%ebp), %esp
++ /* get physical address of control page now */
++ /* this is impossible after page table switch */
++ movl PTR(PA_CONTROL_PAGE)(%ebp), %edi
+
+- /* store the parameters back on the stack */
+- pushl %edx /* store the start address */
++ /* switch to new set of page tables */
++ movl PTR(PA_PGD)(%ebp), %eax
++ movl %eax, %cr3
++
++ /* setup idt */
++ movl %edi, %eax
++ addl $(idt_48 - relocate_kernel), %eax
++ lidtl (%eax)
++
++ /* setup gdt */
++ movl %edi, %eax
++ addl $(gdt - relocate_kernel), %eax
++ movl %edi, %esi
++ addl $((gdt_48 - relocate_kernel) + 2), %esi
++ movl %eax, (%esi)
++
++ movl %edi, %eax
++ addl $(gdt_48 - relocate_kernel), %eax
++ lgdtl (%eax)
++
++ /* setup data segment registers */
++ mov $(gdt_ds - gdt), %eax
++ mov %eax, %ds
++ mov %eax, %es
++ mov %eax, %fs
++ mov %eax, %gs
++ mov %eax, %ss
++
++ /* setup a new stack at the end of the physical control page */
++ lea 4096(%edi), %esp
++
++ /* load new code segment and jump to identity mapped page */
++ movl %edi, %esi
++ xorl %eax, %eax
++ pushl %eax
++ pushl %esi
++ pushl %eax
++ movl $(gdt_cs - gdt), %eax
++ pushl %eax
++ movl %edi, %eax
++ addl $(identity_mapped - relocate_kernel),%eax
++ pushl %eax
++ iretl
++
++identity_mapped:
++ /* store the start address on the stack */
++ pushl %edx
+
+ /* Set cr0 to a known state:
+ * 31 0 == Paging disabled
+@@ -113,8 +281,20 @@ relocate_new_kernel:
+ xorl %edi, %edi
+ xorl %ebp, %ebp
+ ret
+-relocate_new_kernel_end:
+
+- .globl relocate_new_kernel_size
+-relocate_new_kernel_size:
+- .long relocate_new_kernel_end - relocate_new_kernel
++ .align 16
++gdt:
++ .quad 0x0000000000000000 /* NULL descriptor */
++gdt_cs:
++ .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
++gdt_ds:
++ .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
++gdt_end:
++
++gdt_48:
++ .word gdt_end - gdt - 1 /* limit */
++ .long 0 /* base - filled in by code above */
++
++idt_48:
++ .word 0 /* limit */
++ .long 0 /* base */
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/setup-xen.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/setup-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/setup-xen.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/setup-xen.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,1919 @@
++/*
++ * linux/arch/i386/kernel/setup.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ *
++ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
++ *
++ * Memory region support
++ * David Parsons <orc@pell.chi.il.us>, July-August 1999
++ *
++ * Added E820 sanitization routine (removes overlapping memory regions);
++ * Brian Moyle <bmoyle@mvista.com>, February 2001
++ *
++ * Moved CPU detection code to cpu/${cpu}.c
++ * Patrick Mochel <mochel@osdl.org>, March 2002
++ *
++ * Provisions for empty E820 memory regions (reported by certain BIOSes).
++ * Alex Achenbach <xela@slit.de>, December 2002.
++ *
++ */
++
++/*
++ * This file handles the architecture-dependent parts of initialization
++ */
++
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/mmzone.h>
++#include <linux/screen_info.h>
++#include <linux/ioport.h>
++#include <linux/acpi.h>
++#include <linux/apm_bios.h>
++#include <linux/initrd.h>
++#include <linux/bootmem.h>
++#include <linux/seq_file.h>
++#include <linux/platform_device.h>
++#include <linux/console.h>
++#include <linux/mca.h>
++#include <linux/root_dev.h>
++#include <linux/highmem.h>
++#include <linux/module.h>
++#include <linux/efi.h>
++#include <linux/init.h>
++#include <linux/edd.h>
++#include <linux/nodemask.h>
++#include <linux/kernel.h>
++#include <linux/percpu.h>
++#include <linux/notifier.h>
++#include <linux/kexec.h>
++#include <linux/crash_dump.h>
++#include <linux/dmi.h>
++#include <linux/pfn.h>
++
++#include <video/edid.h>
++
++#include <asm/apic.h>
++#include <asm/e820.h>
++#include <asm/mpspec.h>
++#include <asm/setup.h>
++#include <asm/arch_hooks.h>
++#include <asm/sections.h>
++#include <asm/io_apic.h>
++#include <asm/ist.h>
++#include <asm/io.h>
++#include <asm/hypervisor.h>
++#include <xen/interface/physdev.h>
++#include <xen/interface/memory.h>
++#include <xen/features.h>
++#include <xen/firmware.h>
++#include <xen/xencons.h>
++#include <setup_arch.h>
++#include <bios_ebda.h>
++
++#ifdef CONFIG_XEN
++#include <xen/interface/kexec.h>
++#endif
++
++/* Forward Declaration. */
++void __init find_max_pfn(void);
++
++static int xen_panic_event(struct notifier_block *, unsigned long, void *);
++static struct notifier_block xen_panic_block = {
++ xen_panic_event, NULL, 0 /* try to go last */
++};
++
++extern char hypercall_page[PAGE_SIZE];
++EXPORT_SYMBOL(hypercall_page);
++
++int disable_pse __devinitdata = 0;
++
++/*
++ * Machine setup..
++ */
++
++#ifdef CONFIG_EFI
++int efi_enabled = 0;
++EXPORT_SYMBOL(efi_enabled);
++#endif
++
++/* cpu data as detected by the assembly code in head.S */
++struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
++/* common cpu data for all cpus */
++struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
++EXPORT_SYMBOL(boot_cpu_data);
++
++unsigned long mmu_cr4_features;
++
++#ifdef CONFIG_ACPI
++ int acpi_disabled = 0;
++#else
++ int acpi_disabled = 1;
++#endif
++EXPORT_SYMBOL(acpi_disabled);
++
++#ifdef CONFIG_ACPI
++int __initdata acpi_force = 0;
++extern acpi_interrupt_flags acpi_sci_flags;
++#endif
++
++/* for MCA, but anyone else can use it if they want */
++unsigned int machine_id;
++#ifdef CONFIG_MCA
++EXPORT_SYMBOL(machine_id);
++#endif
++unsigned int machine_submodel_id;
++unsigned int BIOS_revision;
++unsigned int mca_pentium_flag;
++
++/* For PCI or other memory-mapped resources */
++unsigned long pci_mem_start = 0x10000000;
++#ifdef CONFIG_PCI
++EXPORT_SYMBOL(pci_mem_start);
++#endif
++
++/* Boot loader ID as an integer, for the benefit of proc_dointvec */
++int bootloader_type;
++
++/* user-defined highmem size */
++static unsigned int highmem_pages = -1;
++
++/*
++ * Setup options
++ */
++struct drive_info_struct { char dummy[32]; } drive_info;
++#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || \
++ defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
++EXPORT_SYMBOL(drive_info);
++#endif
++struct screen_info screen_info;
++EXPORT_SYMBOL(screen_info);
++struct apm_info apm_info;
++EXPORT_SYMBOL(apm_info);
++struct sys_desc_table_struct {
++ unsigned short length;
++ unsigned char table[0];
++};
++struct edid_info edid_info;
++EXPORT_SYMBOL_GPL(edid_info);
++#ifndef CONFIG_XEN
++#define copy_edid() (edid_info = EDID_INFO)
++#endif
++struct ist_info ist_info;
++#if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
++ defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
++EXPORT_SYMBOL(ist_info);
++#endif
++struct e820map e820;
++#ifdef CONFIG_XEN
++struct e820map machine_e820;
++#endif
++
++extern void early_cpu_init(void);
++extern void generic_apic_probe(char *);
++extern int root_mountflags;
++
++unsigned long saved_videomode;
++
++#define RAMDISK_IMAGE_START_MASK 0x07FF
++#define RAMDISK_PROMPT_FLAG 0x8000
++#define RAMDISK_LOAD_FLAG 0x4000
++
++static char command_line[COMMAND_LINE_SIZE];
++
++unsigned char __initdata boot_params[PARAM_SIZE];
++
++static struct resource data_resource = {
++ .name = "Kernel data",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
++};
++
++static struct resource code_resource = {
++ .name = "Kernel code",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
++};
++
++static struct resource system_rom_resource = {
++ .name = "System ROM",
++ .start = 0xf0000,
++ .end = 0xfffff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++};
++
++static struct resource extension_rom_resource = {
++ .name = "Extension ROM",
++ .start = 0xe0000,
++ .end = 0xeffff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++};
++
++static struct resource adapter_rom_resources[] = { {
++ .name = "Adapter ROM",
++ .start = 0xc8000,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++} };
++
++#define ADAPTER_ROM_RESOURCES \
++ (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
++
++static struct resource video_rom_resource = {
++ .name = "Video ROM",
++ .start = 0xc0000,
++ .end = 0xc7fff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++};
++
++static struct resource video_ram_resource = {
++ .name = "Video RAM area",
++ .start = 0xa0000,
++ .end = 0xbffff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
++};
++
++static struct resource standard_io_resources[] = { {
++ .name = "dma1",
++ .start = 0x0000,
++ .end = 0x001f,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "pic1",
++ .start = 0x0020,
++ .end = 0x0021,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "timer0",
++ .start = 0x0040,
++ .end = 0x0043,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "timer1",
++ .start = 0x0050,
++ .end = 0x0053,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "keyboard",
++ .start = 0x0060,
++ .end = 0x006f,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "dma page reg",
++ .start = 0x0080,
++ .end = 0x008f,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "pic2",
++ .start = 0x00a0,
++ .end = 0x00a1,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "dma2",
++ .start = 0x00c0,
++ .end = 0x00df,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "fpu",
++ .start = 0x00f0,
++ .end = 0x00ff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++} };
++
++#define STANDARD_IO_RESOURCES \
++ (sizeof standard_io_resources / sizeof standard_io_resources[0])
++
++#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
++
++static int __init romchecksum(unsigned char *rom, unsigned long length)
++{
++ unsigned char *p, sum = 0;
++
++ for (p = rom; p < rom + length; p++)
++ sum += *p;
++ return sum == 0;
++}
++
++static void __init probe_roms(void)
++{
++ unsigned long start, length, upper;
++ unsigned char *rom;
++ int i;
++
++#ifdef CONFIG_XEN
++ /* Nothing to do if not running in dom0. */
++ if (!is_initial_xendomain())
++ return;
++#endif
++
++ /* video rom */
++ upper = adapter_rom_resources[0].start;
++ for (start = video_rom_resource.start; start < upper; start += 2048) {
++ rom = isa_bus_to_virt(start);
++ if (!romsignature(rom))
++ continue;
++
++ video_rom_resource.start = start;
++
++ /* 0 < length <= 0x7f * 512, historically */
++ length = rom[2] * 512;
++
++ /* if checksum okay, trust length byte */
++ if (length && romchecksum(rom, length))
++ video_rom_resource.end = start + length - 1;
++
++ request_resource(&iomem_resource, &video_rom_resource);
++ break;
++ }
++
++ start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
++ if (start < upper)
++ start = upper;
++
++ /* system rom */
++ request_resource(&iomem_resource, &system_rom_resource);
++ upper = system_rom_resource.start;
++
++ /* check for extension rom (ignore length byte!) */
++ rom = isa_bus_to_virt(extension_rom_resource.start);
++ if (romsignature(rom)) {
++ length = extension_rom_resource.end - extension_rom_resource.start + 1;
++ if (romchecksum(rom, length)) {
++ request_resource(&iomem_resource, &extension_rom_resource);
++ upper = extension_rom_resource.start;
++ }
++ }
++
++ /* check for adapter roms on 2k boundaries */
++ for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
++ rom = isa_bus_to_virt(start);
++ if (!romsignature(rom))
++ continue;
++
++ /* 0 < length <= 0x7f * 512, historically */
++ length = rom[2] * 512;
++
++ /* but accept any length that fits if checksum okay */
++ if (!length || start + length > upper || !romchecksum(rom, length))
++ continue;
++
++ adapter_rom_resources[i].start = start;
++ adapter_rom_resources[i].end = start + length - 1;
++ request_resource(&iomem_resource, &adapter_rom_resources[i]);
++
++ start = adapter_rom_resources[i++].end & ~2047UL;
++ }
++}
++
++/*
++ * Point at the empty zero page to start with. We map the real shared_info
++ * page as soon as fixmap is up and running.
++ */
++shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
++EXPORT_SYMBOL(HYPERVISOR_shared_info);
++
++unsigned long *phys_to_machine_mapping;
++unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[16];
++EXPORT_SYMBOL(phys_to_machine_mapping);
++
++/* Raw start-of-day parameters from the hypervisor. */
++start_info_t *xen_start_info;
++EXPORT_SYMBOL(xen_start_info);
++
++void __init add_memory_region(unsigned long long start,
++ unsigned long long size, int type)
++{
++ int x;
++
++ if (!efi_enabled) {
++ x = e820.nr_map;
++
++ if (x == E820MAX) {
++ printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
++ return;
++ }
++
++ e820.map[x].addr = start;
++ e820.map[x].size = size;
++ e820.map[x].type = type;
++ e820.nr_map++;
++ }
++} /* add_memory_region */
++
++static void __init limit_regions(unsigned long long size)
++{
++ unsigned long long current_addr = 0;
++ int i;
++
++ if (efi_enabled) {
++ efi_memory_desc_t *md;
++ void *p;
++
++ for (p = memmap.map, i = 0; p < memmap.map_end;
++ p += memmap.desc_size, i++) {
++ md = p;
++ current_addr = md->phys_addr + (md->num_pages << 12);
++ if (md->type == EFI_CONVENTIONAL_MEMORY) {
++ if (current_addr >= size) {
++ md->num_pages -=
++ (((current_addr-size) + PAGE_SIZE-1) >> PAGE_SHIFT);
++ memmap.nr_map = i + 1;
++ return;
++ }
++ }
++ }
++ }
++ for (i = 0; i < e820.nr_map; i++) {
++ current_addr = e820.map[i].addr + e820.map[i].size;
++ if (current_addr < size)
++ continue;
++
++ if (e820.map[i].type != E820_RAM)
++ continue;
++
++ if (e820.map[i].addr >= size) {
++ /*
++ * This region starts past the end of the
++ * requested size, skip it completely.
++ */
++ e820.nr_map = i;
++ } else {
++ e820.nr_map = i + 1;
++ e820.map[i].size -= current_addr - size;
++ }
++ return;
++ }
++#ifdef CONFIG_XEN
++ if (i==e820.nr_map && current_addr < size) {
++ /*
++ * The e820 map finished before our requested size so
++ * extend the final entry to the requested address.
++ */
++ --i;
++ if (e820.map[i].type == E820_RAM)
++ e820.map[i].size -= current_addr - size;
++ else
++ add_memory_region(current_addr, size - current_addr, E820_RAM);
++ }
++#endif
++}
++
++#define E820_DEBUG 1
++
++static void __init print_memory_map(char *who)
++{
++ int i;
++
++ for (i = 0; i < e820.nr_map; i++) {
++ printk(" %s: %016Lx - %016Lx ", who,
++ e820.map[i].addr,
++ e820.map[i].addr + e820.map[i].size);
++ switch (e820.map[i].type) {
++ case E820_RAM: printk("(usable)\n");
++ break;
++ case E820_RESERVED:
++ printk("(reserved)\n");
++ break;
++ case E820_ACPI:
++ printk("(ACPI data)\n");
++ break;
++ case E820_NVS:
++ printk("(ACPI NVS)\n");
++ break;
++ default: printk("type %lu\n", e820.map[i].type);
++ break;
++ }
++ }
++}
++
++/*
++ * Sanitize the BIOS e820 map.
++ *
++ * Some e820 responses include overlapping entries. The following
++ * replaces the original e820 map with a new one, removing overlaps.
++ *
++ */
++struct change_member {
++ struct e820entry *pbios; /* pointer to original bios entry */
++ unsigned long long addr; /* address for this change point */
++};
++static struct change_member change_point_list[2*E820MAX] __initdata;
++static struct change_member *change_point[2*E820MAX] __initdata;
++static struct e820entry *overlap_list[E820MAX] __initdata;
++static struct e820entry new_bios[E820MAX] __initdata;
++
++int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
++{
++ struct change_member *change_tmp;
++ unsigned long current_type, last_type;
++ unsigned long long last_addr;
++ int chgidx, still_changing;
++ int overlap_entries;
++ int new_bios_entry;
++ int old_nr, new_nr, chg_nr;
++ int i;
++
++ /*
++ Visually we're performing the following (1,2,3,4 = memory types)...
++
++ Sample memory map (w/overlaps):
++ ____22__________________
++ ______________________4_
++ ____1111________________
++ _44_____________________
++ 11111111________________
++ ____________________33__
++ ___________44___________
++ __________33333_________
++ ______________22________
++ ___________________2222_
++ _________111111111______
++ _____________________11_
++ _________________4______
++
++ Sanitized equivalent (no overlap):
++ 1_______________________
++ _44_____________________
++ ___1____________________
++ ____22__________________
++ ______11________________
++ _________1______________
++ __________3_____________
++ ___________44___________
++ _____________33_________
++ _______________2________
++ ________________1_______
++ _________________4______
++ ___________________2____
++ ____________________33__
++ ______________________4_
++ */
++
++ /* if there's only one memory region, don't bother */
++ if (*pnr_map < 2)
++ return -1;
++
++ old_nr = *pnr_map;
++
++ /* bail out if we find any unreasonable addresses in bios map */
++ for (i=0; i<old_nr; i++)
++ if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
++ return -1;
++
++ /* create pointers for initial change-point information (for sorting) */
++ for (i=0; i < 2*old_nr; i++)
++ change_point[i] = &change_point_list[i];
++
++ /* record all known change-points (starting and ending addresses),
++ omitting those that are for empty memory regions */
++ chgidx = 0;
++ for (i=0; i < old_nr; i++) {
++ if (biosmap[i].size != 0) {
++ change_point[chgidx]->addr = biosmap[i].addr;
++ change_point[chgidx++]->pbios = &biosmap[i];
++ change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
++ change_point[chgidx++]->pbios = &biosmap[i];
++ }
++ }
++ chg_nr = chgidx; /* true number of change-points */
++
++ /* sort change-point list by memory addresses (low -> high) */
++ still_changing = 1;
++ while (still_changing) {
++ still_changing = 0;
++ for (i=1; i < chg_nr; i++) {
++ /* if <current_addr> > <last_addr>, swap */
++ /* or, if current=<start_addr> & last=<end_addr>, swap */
++ if ((change_point[i]->addr < change_point[i-1]->addr) ||
++ ((change_point[i]->addr == change_point[i-1]->addr) &&
++ (change_point[i]->addr == change_point[i]->pbios->addr) &&
++ (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
++ )
++ {
++ change_tmp = change_point[i];
++ change_point[i] = change_point[i-1];
++ change_point[i-1] = change_tmp;
++ still_changing=1;
++ }
++ }
++ }
++
++ /* create a new bios memory map, removing overlaps */
++ overlap_entries=0; /* number of entries in the overlap table */
++ new_bios_entry=0; /* index for creating new bios map entries */
++ last_type = 0; /* start with undefined memory type */
++ last_addr = 0; /* start with 0 as last starting address */
++ /* loop through change-points, determining affect on the new bios map */
++ for (chgidx=0; chgidx < chg_nr; chgidx++)
++ {
++ /* keep track of all overlapping bios entries */
++ if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
++ {
++ /* add map entry to overlap list (> 1 entry implies an overlap) */
++ overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
++ }
++ else
++ {
++ /* remove entry from list (order independent, so swap with last) */
++ for (i=0; i<overlap_entries; i++)
++ {
++ if (overlap_list[i] == change_point[chgidx]->pbios)
++ overlap_list[i] = overlap_list[overlap_entries-1];
++ }
++ overlap_entries--;
++ }
++ /* if there are overlapping entries, decide which "type" to use */
++ /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
++ current_type = 0;
++ for (i=0; i<overlap_entries; i++)
++ if (overlap_list[i]->type > current_type)
++ current_type = overlap_list[i]->type;
++ /* continue building up new bios map based on this information */
++ if (current_type != last_type) {
++ if (last_type != 0) {
++ new_bios[new_bios_entry].size =
++ change_point[chgidx]->addr - last_addr;
++ /* move forward only if the new size was non-zero */
++ if (new_bios[new_bios_entry].size != 0)
++ if (++new_bios_entry >= E820MAX)
++ break; /* no more space left for new bios entries */
++ }
++ if (current_type != 0) {
++ new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
++ new_bios[new_bios_entry].type = current_type;
++ last_addr=change_point[chgidx]->addr;
++ }
++ last_type = current_type;
++ }
++ }
++ new_nr = new_bios_entry; /* retain count for new bios entries */
++
++ /* copy new bios mapping into original location */
++ memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
++ *pnr_map = new_nr;
++
++ return 0;
++}
++
++/*
++ * Copy the BIOS e820 map into a safe place.
++ *
++ * Sanity-check it while we're at it..
++ *
++ * If we're lucky and live on a modern system, the setup code
++ * will have given us a memory map that we can use to properly
++ * set up memory. If we aren't, we'll fake a memory map.
++ *
++ * We check to see that the memory map contains at least 2 elements
++ * before we'll use it, because the detection code in setup.S may
++ * not be perfect and most every PC known to man has two memory
++ * regions: one from 0 to 640k, and one from 1mb up. (The IBM
++ * thinkpad 560x, for example, does not cooperate with the memory
++ * detection code.)
++ */
++int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
++{
++#ifndef CONFIG_XEN
++ /* Only one memory region (or negative)? Ignore it */
++ if (nr_map < 2)
++ return -1;
++#else
++ BUG_ON(nr_map < 1);
++#endif
++
++ do {
++ unsigned long long start = biosmap->addr;
++ unsigned long long size = biosmap->size;
++ unsigned long long end = start + size;
++ unsigned long type = biosmap->type;
++
++ /* Overflow in 64 bits? Ignore the memory map. */
++ if (start > end)
++ return -1;
++
++#ifndef CONFIG_XEN
++ /*
++ * Some BIOSes claim RAM in the 640k - 1M region.
++ * Not right. Fix it up.
++ */
++ if (type == E820_RAM) {
++ if (start < 0x100000ULL && end > 0xA0000ULL) {
++ if (start < 0xA0000ULL)
++ add_memory_region(start, 0xA0000ULL-start, type);
++ if (end <= 0x100000ULL)
++ continue;
++ start = 0x100000ULL;
++ size = end - start;
++ }
++ }
++#endif
++ add_memory_region(start, size, type);
++ } while (biosmap++,--nr_map);
++ return 0;
++}
++
++#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
++struct edd edd;
++#ifdef CONFIG_EDD_MODULE
++EXPORT_SYMBOL(edd);
++#endif
++#ifndef CONFIG_XEN
++/**
++ * copy_edd() - Copy the BIOS EDD information
++ * from boot_params into a safe place.
++ *
++ */
++static inline void copy_edd(void)
++{
++ memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
++ memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
++ edd.mbr_signature_nr = EDD_MBR_SIG_NR;
++ edd.edd_info_nr = EDD_NR;
++}
++#endif
++#else
++static inline void copy_edd(void)
++{
++}
++#endif
++
++static void __init parse_cmdline_early (char ** cmdline_p)
++{
++ char c = ' ', *to = command_line, *from = saved_command_line;
++ int len = 0, max_cmdline;
++ int userdef = 0;
++
++ if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
++ max_cmdline = COMMAND_LINE_SIZE;
++ memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
++ /* Save unparsed command line copy for /proc/cmdline */
++ saved_command_line[max_cmdline-1] = '\0';
++
++ for (;;) {
++ if (c != ' ')
++ goto next_char;
++ /*
++ * "mem=nopentium" disables the 4MB page tables.
++ * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
++ * to <mem>, overriding the bios size.
++ * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from
++ * <start> to <start>+<mem>, overriding the bios size.
++ *
++ * HPA tells me bootloaders need to parse mem=, so no new
++ * option should be mem= [also see Documentation/i386/boot.txt]
++ */
++ if (!memcmp(from, "mem=", 4)) {
++ if (to != command_line)
++ to--;
++ if (!memcmp(from+4, "nopentium", 9)) {
++ from += 9+4;
++ clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
++ disable_pse = 1;
++ } else {
++ /* If the user specifies memory size, we
++ * limit the BIOS-provided memory map to
++ * that size. exactmap can be used to specify
++ * the exact map. mem=number can be used to
++ * trim the existing memory map.
++ */
++ unsigned long long mem_size;
++
++ mem_size = memparse(from+4, &from);
++ limit_regions(mem_size);
++ userdef=1;
++ }
++ }
++
++ else if (!memcmp(from, "memmap=", 7)) {
++ if (to != command_line)
++ to--;
++ if (!memcmp(from+7, "exactmap", 8)) {
++#ifdef CONFIG_CRASH_DUMP
++ /* If we are doing a crash dump, we
++ * still need to know the real mem
++ * size before original memory map is
++ * reset.
++ */
++ find_max_pfn();
++ saved_max_pfn = max_pfn;
++#endif
++ from += 8+7;
++ e820.nr_map = 0;
++ userdef = 1;
++ } else {
++ /* If the user specifies memory size, we
++ * limit the BIOS-provided memory map to
++ * that size. exactmap can be used to specify
++ * the exact map. mem=number can be used to
++ * trim the existing memory map.
++ */
++ unsigned long long start_at, mem_size;
++
++ mem_size = memparse(from+7, &from);
++ if (*from == '@') {
++ start_at = memparse(from+1, &from);
++ add_memory_region(start_at, mem_size, E820_RAM);
++ } else if (*from == '#') {
++ start_at = memparse(from+1, &from);
++ add_memory_region(start_at, mem_size, E820_ACPI);
++ } else if (*from == '$') {
++ start_at = memparse(from+1, &from);
++ add_memory_region(start_at, mem_size, E820_RESERVED);
++ } else {
++ limit_regions(mem_size);
++ userdef=1;
++ }
++ }
++ }
++
++ else if (!memcmp(from, "noexec=", 7))
++ noexec_setup(from + 7);
++
++
++#ifdef CONFIG_X86_MPPARSE
++ /*
++ * If the BIOS enumerates physical processors before logical,
++ * maxcpus=N at enumeration-time can be used to disable HT.
++ */
++ else if (!memcmp(from, "maxcpus=", 8)) {
++ extern unsigned int maxcpus;
++
++ maxcpus = simple_strtoul(from + 8, NULL, 0);
++ }
++#endif
++
++#ifdef CONFIG_ACPI
++ /* "acpi=off" disables both ACPI table parsing and interpreter */
++ else if (!memcmp(from, "acpi=off", 8)) {
++ disable_acpi();
++ }
++
++ /* acpi=force to over-ride black-list */
++ else if (!memcmp(from, "acpi=force", 10)) {
++ acpi_force = 1;
++ acpi_ht = 1;
++ acpi_disabled = 0;
++ }
++
++ /* acpi=strict disables out-of-spec workarounds */
++ else if (!memcmp(from, "acpi=strict", 11)) {
++ acpi_strict = 1;
++ }
++
++ /* Limit ACPI just to boot-time to enable HT */
++ else if (!memcmp(from, "acpi=ht", 7)) {
++ if (!acpi_force)
++ disable_acpi();
++ acpi_ht = 1;
++ }
++
++ /* "pci=noacpi" disable ACPI IRQ routing and PCI scan */
++ else if (!memcmp(from, "pci=noacpi", 10)) {
++ acpi_disable_pci();
++ }
++ /* "acpi=noirq" disables ACPI interrupt routing */
++ else if (!memcmp(from, "acpi=noirq", 10)) {
++ acpi_noirq_set();
++ }
++
++ else if (!memcmp(from, "acpi_sci=edge", 13))
++ acpi_sci_flags.trigger = 1;
++
++ else if (!memcmp(from, "acpi_sci=level", 14))
++ acpi_sci_flags.trigger = 3;
++
++ else if (!memcmp(from, "acpi_sci=high", 13))
++ acpi_sci_flags.polarity = 1;
++
++ else if (!memcmp(from, "acpi_sci=low", 12))
++ acpi_sci_flags.polarity = 3;
++
++#ifdef CONFIG_X86_IO_APIC
++ else if (!memcmp(from, "acpi_skip_timer_override", 24))
++ acpi_skip_timer_override = 1;
++
++ if (!memcmp(from, "disable_timer_pin_1", 19))
++ disable_timer_pin_1 = 1;
++ if (!memcmp(from, "enable_timer_pin_1", 18))
++ disable_timer_pin_1 = -1;
++
++ /* disable IO-APIC */
++ else if (!memcmp(from, "noapic", 6))
++ disable_ioapic_setup();
++#endif /* CONFIG_X86_IO_APIC */
++#endif /* CONFIG_ACPI */
++
++#ifdef CONFIG_X86_LOCAL_APIC
++ /* enable local APIC */
++ else if (!memcmp(from, "lapic", 5))
++ lapic_enable();
++
++ /* disable local APIC */
++ else if (!memcmp(from, "nolapic", 6))
++ lapic_disable();
++#endif /* CONFIG_X86_LOCAL_APIC */
++
++#ifdef CONFIG_KEXEC
++ /* crashkernel=size@addr specifies the location to reserve for
++ * a crash kernel. By reserving this memory we guarantee
++ * that linux never set's it up as a DMA target.
++ * Useful for holding code to do something appropriate
++ * after a kernel panic.
++ */
++ else if (!memcmp(from, "crashkernel=", 12)) {
++#ifndef CONFIG_XEN
++ unsigned long size, base;
++ size = memparse(from+12, &from);
++ if (*from == '@') {
++ base = memparse(from+1, &from);
++ /* FIXME: Do I want a sanity check
++ * to validate the memory range?
++ */
++ crashk_res.start = base;
++ crashk_res.end = base + size - 1;
++ }
++#else
++ printk("Ignoring crashkernel command line, "
++ "parameter will be supplied by xen\n");
++#endif
++ }
++#endif
++#ifdef CONFIG_PROC_VMCORE
++ /* elfcorehdr= specifies the location of elf core header
++ * stored by the crashed kernel.
++ */
++ else if (!memcmp(from, "elfcorehdr=", 11))
++ elfcorehdr_addr = memparse(from+11, &from);
++#endif
++
++ /*
++ * highmem=size forces highmem to be exactly 'size' bytes.
++ * This works even on boxes that have no highmem otherwise.
++ * This also works to reduce highmem size on bigger boxes.
++ */
++ else if (!memcmp(from, "highmem=", 8))
++ highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT;
++
++ /*
++ * vmalloc=size forces the vmalloc area to be exactly 'size'
++ * bytes. This can be used to increase (or decrease) the
++ * vmalloc area - the default is 128m.
++ */
++ else if (!memcmp(from, "vmalloc=", 8))
++ __VMALLOC_RESERVE = memparse(from+8, &from);
++
++ next_char:
++ c = *(from++);
++ if (!c)
++ break;
++ if (COMMAND_LINE_SIZE <= ++len)
++ break;
++ *(to++) = c;
++ }
++ *to = '\0';
++ *cmdline_p = command_line;
++ if (userdef) {
++ printk(KERN_INFO "user-defined physical RAM map:\n");
++ print_memory_map("user");
++ }
++}
++
++/*
++ * Callback for efi_memory_walk.
++ */
++static int __init
++efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
++{
++ unsigned long *max_pfn = arg, pfn;
++
++ if (start < end) {
++ pfn = PFN_UP(end -1);
++ if (pfn > *max_pfn)
++ *max_pfn = pfn;
++ }
++ return 0;
++}
++
++static int __init
++efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
++{
++ memory_present(0, start, end);
++ return 0;
++}
++
++/*
++ * This function checks if any part of the range <start,end> is mapped
++ * with type.
++ */
++int
++e820_any_mapped(u64 start, u64 end, unsigned type)
++{
++ int i;
++
++#ifndef CONFIG_XEN
++ for (i = 0; i < e820.nr_map; i++) {
++ const struct e820entry *ei = &e820.map[i];
++#else
++ if (!is_initial_xendomain())
++ return 0;
++ for (i = 0; i < machine_e820.nr_map; ++i) {
++ const struct e820entry *ei = &machine_e820.map[i];
++#endif
++
++ if (type && ei->type != type)
++ continue;
++ if (ei->addr >= end || ei->addr + ei->size <= start)
++ continue;
++ return 1;
++ }
++ return 0;
++}
++EXPORT_SYMBOL_GPL(e820_any_mapped);
++
++ /*
++ * This function checks if the entire range <start,end> is mapped with type.
++ *
++ * Note: this function only works correct if the e820 table is sorted and
++ * not-overlapping, which is the case
++ */
++int __init
++e820_all_mapped(unsigned long s, unsigned long e, unsigned type)
++{
++ u64 start = s;
++ u64 end = e;
++ int i;
++
++#ifndef CONFIG_XEN
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++#else
++ if (!is_initial_xendomain())
++ return 0;
++ for (i = 0; i < machine_e820.nr_map; ++i) {
++ const struct e820entry *ei = &machine_e820.map[i];
++#endif
++ if (type && ei->type != type)
++ continue;
++ /* is the region (part) in overlap with the current region ?*/
++ if (ei->addr >= end || ei->addr + ei->size <= start)
++ continue;
++ /* if the region is at the beginning of <start,end> we move
++ * start to the end of the region since it's ok until there
++ */
++ if (ei->addr <= start)
++ start = ei->addr + ei->size;
++ /* if start is now at or beyond end, we're done, full
++ * coverage */
++ if (start >= end)
++ return 1; /* we're done */
++ }
++ return 0;
++}
++
++/*
++ * Find the highest page frame number we have available
++ */
++void __init find_max_pfn(void)
++{
++ int i;
++
++ max_pfn = 0;
++ if (efi_enabled) {
++ efi_memmap_walk(efi_find_max_pfn, &max_pfn);
++ efi_memmap_walk(efi_memory_present_wrapper, NULL);
++ return;
++ }
++
++ for (i = 0; i < e820.nr_map; i++) {
++ unsigned long start, end;
++ /* RAM? */
++ if (e820.map[i].type != E820_RAM)
++ continue;
++ start = PFN_UP(e820.map[i].addr);
++ end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
++ if (start >= end)
++ continue;
++ if (end > max_pfn)
++ max_pfn = end;
++ memory_present(0, start, end);
++ }
++}
++
++/*
++ * Determine low and high memory ranges:
++ */
++unsigned long __init find_max_low_pfn(void)
++{
++ unsigned long max_low_pfn;
++
++ max_low_pfn = max_pfn;
++ if (max_low_pfn > MAXMEM_PFN) {
++ if (highmem_pages == -1)
++ highmem_pages = max_pfn - MAXMEM_PFN;
++ if (highmem_pages + MAXMEM_PFN < max_pfn)
++ max_pfn = MAXMEM_PFN + highmem_pages;
++ if (highmem_pages + MAXMEM_PFN > max_pfn) {
++ printk("only %luMB highmem pages available, ignoring highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages));
++ highmem_pages = 0;
++ }
++ max_low_pfn = MAXMEM_PFN;
++#ifndef CONFIG_HIGHMEM
++ /* Maximum memory usable is what is directly addressable */
++ printk(KERN_WARNING "Warning only %ldMB will be used.\n",
++ MAXMEM>>20);
++ if (max_pfn > MAX_NONPAE_PFN)
++ printk(KERN_WARNING "Use a PAE enabled kernel.\n");
++ else
++ printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
++ max_pfn = MAXMEM_PFN;
++#else /* !CONFIG_HIGHMEM */
++#ifndef CONFIG_X86_PAE
++ if (max_pfn > MAX_NONPAE_PFN) {
++ max_pfn = MAX_NONPAE_PFN;
++ printk(KERN_WARNING "Warning only 4GB will be used.\n");
++ printk(KERN_WARNING "Use a PAE enabled kernel.\n");
++ }
++#endif /* !CONFIG_X86_PAE */
++#endif /* !CONFIG_HIGHMEM */
++ } else {
++ if (highmem_pages == -1)
++ highmem_pages = 0;
++#ifdef CONFIG_HIGHMEM
++ if (highmem_pages >= max_pfn) {
++ printk(KERN_ERR "highmem size specified (%uMB) is bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
++ highmem_pages = 0;
++ }
++ if (highmem_pages) {
++ if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){
++ printk(KERN_ERR "highmem size %uMB results in smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages));
++ highmem_pages = 0;
++ }
++ max_low_pfn -= highmem_pages;
++ }
++#else
++ if (highmem_pages)
++ printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
++#endif
++ }
++ return max_low_pfn;
++}
++
++/*
++ * Free all available memory for boot time allocation. Used
++ * as a callback function by efi_memory_walk()
++ */
++
++static int __init
++free_available_memory(unsigned long start, unsigned long end, void *arg)
++{
++ /* check max_low_pfn */
++ if (start >= (max_low_pfn << PAGE_SHIFT))
++ return 0;
++ if (end >= (max_low_pfn << PAGE_SHIFT))
++ end = max_low_pfn << PAGE_SHIFT;
++ if (start < end)
++ free_bootmem(start, end - start);
++
++ return 0;
++}
++/*
++ * Register fully available low RAM pages with the bootmem allocator.
++ */
++static void __init register_bootmem_low_pages(unsigned long max_low_pfn)
++{
++ int i;
++
++ if (efi_enabled) {
++ efi_memmap_walk(free_available_memory, NULL);
++ return;
++ }
++ for (i = 0; i < e820.nr_map; i++) {
++ unsigned long curr_pfn, last_pfn, size;
++ /*
++ * Reserve usable low memory
++ */
++ if (e820.map[i].type != E820_RAM)
++ continue;
++ /*
++ * We are rounding up the start address of usable memory:
++ */
++ curr_pfn = PFN_UP(e820.map[i].addr);
++ if (curr_pfn >= max_low_pfn)
++ continue;
++ /*
++ * ... and at the end of the usable range downwards:
++ */
++ last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
++
++#ifdef CONFIG_XEN
++ /*
++ * Truncate to the number of actual pages currently
++ * present.
++ */
++ if (last_pfn > xen_start_info->nr_pages)
++ last_pfn = xen_start_info->nr_pages;
++#endif
++
++ if (last_pfn > max_low_pfn)
++ last_pfn = max_low_pfn;
++
++ /*
++ * .. finally, did all the rounding and playing
++ * around just make the area go away?
++ */
++ if (last_pfn <= curr_pfn)
++ continue;
++
++ size = last_pfn - curr_pfn;
++ free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
++ }
++}
++
++#ifndef CONFIG_XEN
++/*
++ * workaround for Dell systems that neglect to reserve EBDA
++ */
++static void __init reserve_ebda_region(void)
++{
++ unsigned int addr;
++ addr = get_bios_ebda();
++ if (addr)
++ reserve_bootmem(addr, PAGE_SIZE);
++}
++#endif
++
++#ifndef CONFIG_NEED_MULTIPLE_NODES
++void __init setup_bootmem_allocator(void);
++static unsigned long __init setup_memory(void)
++{
++ /*
++ * partially used pages are not usable - thus
++ * we are rounding upwards:
++ */
++ min_low_pfn = PFN_UP(__pa(xen_start_info->pt_base)) +
++ xen_start_info->nr_pt_frames;
++
++ find_max_pfn();
++
++ max_low_pfn = find_max_low_pfn();
++
++#ifdef CONFIG_HIGHMEM
++ highstart_pfn = highend_pfn = max_pfn;
++ if (max_pfn > max_low_pfn) {
++ highstart_pfn = max_low_pfn;
++ }
++ printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
++ pages_to_mb(highend_pfn - highstart_pfn));
++#endif
++ printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
++ pages_to_mb(max_low_pfn));
++
++ setup_bootmem_allocator();
++
++ return max_low_pfn;
++}
++
++void __init zone_sizes_init(void)
++{
++ unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
++ unsigned int max_dma, low;
++
++ max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
++ low = max_low_pfn;
++
++ if (low < max_dma)
++ zones_size[ZONE_DMA] = low;
++ else {
++ zones_size[ZONE_DMA] = max_dma;
++ zones_size[ZONE_NORMAL] = low - max_dma;
++#ifdef CONFIG_HIGHMEM
++ zones_size[ZONE_HIGHMEM] = highend_pfn - low;
++#endif
++ }
++ free_area_init(zones_size);
++}
++#else
++extern unsigned long __init setup_memory(void);
++extern void zone_sizes_init(void);
++#endif /* !CONFIG_NEED_MULTIPLE_NODES */
++
++void __init setup_bootmem_allocator(void)
++{
++ unsigned long bootmap_size;
++ /*
++ * Initialize the boot-time allocator (with low memory only):
++ */
++ bootmap_size = init_bootmem(min_low_pfn, max_low_pfn);
++
++ register_bootmem_low_pages(max_low_pfn);
++
++ /*
++ * Reserve the bootmem bitmap itself as well. We do this in two
++ * steps (first step was init_bootmem()) because this catches
++ * the (very unlikely) case of us accidentally initializing the
++ * bootmem allocator with an invalid RAM area.
++ */
++ reserve_bootmem(__PHYSICAL_START, (PFN_PHYS(min_low_pfn) +
++ bootmap_size + PAGE_SIZE-1) - (__PHYSICAL_START));
++
++#ifndef CONFIG_XEN
++ /*
++ * reserve physical page 0 - it's a special BIOS page on many boxes,
++ * enabling clean reboots, SMP operation, laptop functions.
++ */
++ reserve_bootmem(0, PAGE_SIZE);
++
++ /* reserve EBDA region, it's a 4K region */
++ reserve_ebda_region();
++
++ /* could be an AMD 768MPX chipset. Reserve a page before VGA to prevent
++ PCI prefetch into it (errata #56). Usually the page is reserved anyways,
++ unless you have no PS/2 mouse plugged in. */
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
++ boot_cpu_data.x86 == 6)
++ reserve_bootmem(0xa0000 - 4096, 4096);
++
++#ifdef CONFIG_SMP
++ /*
++ * But first pinch a few for the stack/trampoline stuff
++ * FIXME: Don't need the extra page at 4K, but need to fix
++ * trampoline before removing it. (see the GDT stuff)
++ */
++ reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
++#endif
++#ifdef CONFIG_ACPI_SLEEP
++ /*
++ * Reserve low memory region for sleep support.
++ */
++ acpi_reserve_bootmem();
++#endif
++#endif /* !CONFIG_XEN */
++
++#ifdef CONFIG_BLK_DEV_INITRD
++ if (xen_start_info->mod_start) {
++ if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
++ /*reserve_bootmem(INITRD_START, INITRD_SIZE);*/
++ initrd_start = INITRD_START + PAGE_OFFSET;
++ initrd_end = initrd_start+INITRD_SIZE;
++ initrd_below_start_ok = 1;
++ }
++ else {
++ printk(KERN_ERR "initrd extends beyond end of memory "
++ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
++ INITRD_START + INITRD_SIZE,
++ max_low_pfn << PAGE_SHIFT);
++ initrd_start = 0;
++ }
++ }
++#endif
++#ifdef CONFIG_KEXEC
++#ifdef CONFIG_XEN
++ xen_machine_kexec_setup_resources();
++#else
++ if (crashk_res.start != crashk_res.end)
++ reserve_bootmem(crashk_res.start,
++ crashk_res.end - crashk_res.start + 1);
++#endif
++#endif
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap))
++ phys_to_machine_mapping =
++ (unsigned long *)xen_start_info->mfn_list;
++}
++
++/*
++ * The node 0 pgdat is initialized before all of these because
++ * it's needed for bootmem. node>0 pgdats have their virtual
++ * space allocated before the pagetables are in place to access
++ * them, so they can't be cleared then.
++ *
++ * This should all compile down to nothing when NUMA is off.
++ */
++void __init remapped_pgdat_init(void)
++{
++ int nid;
++
++ for_each_online_node(nid) {
++ if (nid != 0)
++ memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
++ }
++}
++
++/*
++ * Request address space for all standard RAM and ROM resources
++ * and also for regions reported as reserved by the e820.
++ */
++static void __init
++legacy_init_iomem_resources(struct e820entry *e820, int nr_map,
++ struct resource *code_resource,
++ struct resource *data_resource)
++{
++ int i;
++
++ probe_roms();
++
++ for (i = 0; i < nr_map; i++) {
++ struct resource *res;
++#ifndef CONFIG_RESOURCES_64BIT
++ if (e820[i].addr + e820[i].size > 0x100000000ULL)
++ continue;
++#endif
++ res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
++ switch (e820[i].type) {
++ case E820_RAM: res->name = "System RAM"; break;
++ case E820_ACPI: res->name = "ACPI Tables"; break;
++ case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
++ default: res->name = "reserved";
++ }
++ res->start = e820[i].addr;
++ res->end = res->start + e820[i].size - 1;
++ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
++ if (request_resource(&iomem_resource, res)) {
++ kfree(res);
++ continue;
++ }
++ if (e820[i].type == E820_RAM) {
++ /*
++ * We don't know which RAM region contains kernel data,
++ * so we try it repeatedly and let the resource manager
++ * test it.
++ */
++#ifndef CONFIG_XEN
++ request_resource(res, code_resource);
++ request_resource(res, data_resource);
++#endif
++#ifdef CONFIG_KEXEC
++ if (crashk_res.start != crashk_res.end)
++ request_resource(res, &crashk_res);
++#ifdef CONFIG_XEN
++ xen_machine_kexec_register_resources(res);
++#endif
++#endif
++ }
++ }
++}
++
++/*
++ * Locate a unused range of the physical address space below 4G which
++ * can be used for PCI mappings.
++ */
++static void __init
++e820_setup_gap(struct e820entry *e820, int nr_map)
++{
++ unsigned long gapstart, gapsize, round;
++ unsigned long long last;
++ int i;
++
++ /*
++ * Search for the bigest gap in the low 32 bits of the e820
++ * memory space.
++ */
++ last = 0x100000000ull;
++ gapstart = 0x10000000;
++ gapsize = 0x400000;
++ i = nr_map;
++ while (--i >= 0) {
++ unsigned long long start = e820[i].addr;
++ unsigned long long end = start + e820[i].size;
++
++ /*
++ * Since "last" is at most 4GB, we know we'll
++ * fit in 32 bits if this condition is true
++ */
++ if (last > end) {
++ unsigned long gap = last - end;
++
++ if (gap > gapsize) {
++ gapsize = gap;
++ gapstart = end;
++ }
++ }
++ if (start < last)
++ last = start;
++ }
++
++ /*
++ * See how much we want to round up: start off with
++ * rounding to the next 1MB area.
++ */
++ round = 0x100000;
++ while ((gapsize >> 4) > round)
++ round += round;
++ /* Fun with two's complement */
++ pci_mem_start = (gapstart + round) & -round;
++
++ printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
++ pci_mem_start, gapstart, gapsize);
++}
++
++/*
++ * Request address space for all standard resources
++ *
++ * This is called just before pcibios_init(), which is also a
++ * subsys_initcall, but is linked in later (in arch/i386/pci/common.c).
++ */
++static int __init request_standard_resources(void)
++{
++ int i;
++
++ /* Nothing to do if not running in dom0. */
++ if (!is_initial_xendomain())
++ return 0;
++
++ printk("Setting up standard PCI resources\n");
++#ifdef CONFIG_XEN
++ legacy_init_iomem_resources(machine_e820.map, machine_e820.nr_map,
++ &code_resource, &data_resource);
++#else
++ if (efi_enabled)
++ efi_initialize_iomem_resources(&code_resource, &data_resource);
++ else
++ legacy_init_iomem_resources(e820.map, e820.nr_map,
++ &code_resource, &data_resource);
++#endif
++
++ /* EFI systems may still have VGA */
++ request_resource(&iomem_resource, &video_ram_resource);
++
++ /* request I/O space for devices used on all i[345]86 PCs */
++ for (i = 0; i < STANDARD_IO_RESOURCES; i++)
++ request_resource(&ioport_resource, &standard_io_resources[i]);
++ return 0;
++}
++
++subsys_initcall(request_standard_resources);
++
++static void __init register_memory(void)
++{
++#ifdef CONFIG_XEN
++ if (is_initial_xendomain()) {
++ struct xen_memory_map memmap;
++
++ memmap.nr_entries = E820MAX;
++ set_xen_guest_handle(memmap.buffer, machine_e820.map);
++
++ if (HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap))
++ BUG();
++
++ machine_e820.nr_map = memmap.nr_entries;
++ e820_setup_gap(machine_e820.map, machine_e820.nr_map);
++ }
++ else
++#endif
++ e820_setup_gap(e820.map, e820.nr_map);
++}
++
++#ifdef CONFIG_MCA
++static void set_mca_bus(int x)
++{
++ MCA_bus = x;
++}
++#else
++static void set_mca_bus(int x) { }
++#endif
++
++/*
++ * Determine if we were loaded by an EFI loader. If so, then we have also been
++ * passed the efi memmap, systab, etc., so we should use these data structures
++ * for initialization. Note, the efi init code path is determined by the
++ * global efi_enabled. This allows the same kernel image to be used on existing
++ * systems (with a traditional BIOS) as well as on EFI systems.
++ */
++void __init setup_arch(char **cmdline_p)
++{
++ int i, j, k, fpp;
++ struct physdev_set_iopl set_iopl;
++ unsigned long max_low_pfn;
++ unsigned long p2m_pages;
++
++ /* Force a quick death if the kernel panics (not domain 0). */
++ extern int panic_timeout;
++ if (!panic_timeout && !is_initial_xendomain())
++ panic_timeout = 1;
++
++ /* Register a call for panic conditions. */
++ atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
++
++ WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
++ VMASST_TYPE_4gb_segments));
++ WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
++ VMASST_TYPE_writable_pagetables));
++
++ memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
++ early_cpu_init();
++#ifdef CONFIG_SMP
++ prefill_possible_map();
++#endif
++
++ /*
++ * FIXME: This isn't an official loader_type right
++ * now but does currently work with elilo.
++ * If we were configured as an EFI kernel, check to make
++ * sure that we were loaded correctly from elilo and that
++ * the system table is valid. If not, then initialize normally.
++ */
++#ifdef CONFIG_EFI
++ if ((LOADER_TYPE == 0x50) && EFI_SYSTAB)
++ efi_enabled = 1;
++#endif
++
++ /* This must be initialized to UNNAMED_MAJOR for ipconfig to work
++ properly. Setting ROOT_DEV to default to /dev/ram0 breaks initrd.
++ */
++ ROOT_DEV = MKDEV(UNNAMED_MAJOR,0);
++ drive_info = DRIVE_INFO;
++ screen_info = SCREEN_INFO;
++ copy_edid();
++ apm_info.bios = APM_BIOS_INFO;
++ ist_info = IST_INFO;
++ saved_videomode = VIDEO_MODE;
++ if( SYS_DESC_TABLE.length != 0 ) {
++ set_mca_bus(SYS_DESC_TABLE.table[3] & 0x2);
++ machine_id = SYS_DESC_TABLE.table[0];
++ machine_submodel_id = SYS_DESC_TABLE.table[1];
++ BIOS_revision = SYS_DESC_TABLE.table[2];
++ }
++ bootloader_type = LOADER_TYPE;
++
++ if (is_initial_xendomain()) {
++ const struct dom0_vga_console_info *info =
++ (void *)((char *)xen_start_info +
++ xen_start_info->console.dom0.info_off);
++
++ dom0_init_screen_info(info,
++ xen_start_info->console.dom0.info_size);
++ xen_start_info->console.domU.mfn = 0;
++ xen_start_info->console.domU.evtchn = 0;
++ } else
++ screen_info.orig_video_isVGA = 0;
++
++#ifdef CONFIG_BLK_DEV_RAM
++ rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
++ rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
++ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
++#endif
++
++ setup_xen_features();
++
++ ARCH_SETUP
++ if (efi_enabled)
++ efi_init();
++ else {
++ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
++ print_memory_map(machine_specific_memory_setup());
++ }
++
++ copy_edd();
++
++ if (!MOUNT_ROOT_RDONLY)
++ root_mountflags &= ~MS_RDONLY;
++ init_mm.start_code = (unsigned long) _text;
++ init_mm.end_code = (unsigned long) _etext;
++ init_mm.end_data = (unsigned long) _edata;
++ init_mm.brk = (PFN_UP(__pa(xen_start_info->pt_base)) +
++ xen_start_info->nr_pt_frames) << PAGE_SHIFT;
++
++ code_resource.start = virt_to_phys(_text);
++ code_resource.end = virt_to_phys(_etext)-1;
++ data_resource.start = virt_to_phys(_etext);
++ data_resource.end = virt_to_phys(_edata)-1;
++
++ parse_cmdline_early(cmdline_p);
++
++#ifdef CONFIG_EARLY_PRINTK
++ {
++ char *s = strstr(*cmdline_p, "earlyprintk=");
++ if (s) {
++ setup_early_printk(strchr(s, '=') + 1);
++ printk("early console enabled\n");
++ }
++ }
++#endif
++
++ max_low_pfn = setup_memory();
++
++ /*
++ * NOTE: before this point _nobody_ is allowed to allocate
++ * any memory using the bootmem allocator. Although the
++ * alloctor is now initialised only the first 8Mb of the kernel
++ * virtual address space has been mapped. All allocations before
++ * paging_init() has completed must use the alloc_bootmem_low_pages()
++ * variant (which allocates DMA'able memory) and care must be taken
++ * not to exceed the 8Mb limit.
++ */
++
++#ifdef CONFIG_SMP
++ smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
++#endif
++ paging_init();
++ remapped_pgdat_init();
++ sparse_init();
++ zone_sizes_init();
++
++#ifdef CONFIG_X86_FIND_SMP_CONFIG
++ /*
++ * Find and reserve possible boot-time SMP configuration:
++ */
++ find_smp_config();
++#endif
++
++ p2m_pages = max_pfn;
++ if (xen_start_info->nr_pages > max_pfn) {
++ /*
++ * the max_pfn was shrunk (probably by mem= or highmem=
++ * kernel parameter); shrink reservation with the HV
++ */
++ struct xen_memory_reservation reservation = {
++ .address_bits = 0,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ };
++ unsigned int difference;
++ int ret;
++
++ difference = xen_start_info->nr_pages - max_pfn;
++
++ set_xen_guest_handle(reservation.extent_start,
++ ((unsigned long *)xen_start_info->mfn_list) + max_pfn);
++ reservation.nr_extents = difference;
++ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &reservation);
++ BUG_ON (ret != difference);
++ }
++ else if (max_pfn > xen_start_info->nr_pages)
++ p2m_pages = xen_start_info->nr_pages;
++
++ /* Make sure we have a correctly sized P->M table. */
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ phys_to_machine_mapping = alloc_bootmem_low_pages(
++ max_pfn * sizeof(unsigned long));
++ memset(phys_to_machine_mapping, ~0,
++ max_pfn * sizeof(unsigned long));
++ memcpy(phys_to_machine_mapping,
++ (unsigned long *)xen_start_info->mfn_list,
++ p2m_pages * sizeof(unsigned long));
++ free_bootmem(
++ __pa(xen_start_info->mfn_list),
++ PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
++ sizeof(unsigned long))));
++
++ /*
++ * Initialise the list of the frames that specify the list of
++ * frames that make up the p2m table. Used by save/restore
++ */
++ pfn_to_mfn_frame_list_list = alloc_bootmem_low_pages(PAGE_SIZE);
++
++ fpp = PAGE_SIZE/sizeof(unsigned long);
++ for (i=0, j=0, k=-1; i< max_pfn; i+=fpp, j++) {
++ if ((j % fpp) == 0) {
++ k++;
++ BUG_ON(k>=16);
++ pfn_to_mfn_frame_list[k] =
++ alloc_bootmem_low_pages(PAGE_SIZE);
++ pfn_to_mfn_frame_list_list[k] =
++ virt_to_mfn(pfn_to_mfn_frame_list[k]);
++ j=0;
++ }
++ pfn_to_mfn_frame_list[k][j] =
++ virt_to_mfn(&phys_to_machine_mapping[i]);
++ }
++ HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
++ HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
++ virt_to_mfn(pfn_to_mfn_frame_list_list);
++ }
++
++ /* Mark all ISA DMA channels in-use - using them wouldn't work. */
++ for (i = 0; i < MAX_DMA_CHANNELS; ++i)
++ if (i != 4 && request_dma(i, "xen") != 0)
++ BUG();
++
++ /*
++ * NOTE: at this point the bootmem allocator is fully available.
++ */
++
++ if (is_initial_xendomain())
++ dmi_scan_machine();
++
++#ifdef CONFIG_X86_GENERICARCH
++ generic_apic_probe(*cmdline_p);
++#endif
++ if (efi_enabled)
++ efi_map_memmap();
++
++ set_iopl.iopl = 1;
++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl));
++
++#ifdef CONFIG_ACPI
++ if (!is_initial_xendomain()) {
++ printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
++ acpi_disabled = 1;
++ acpi_ht = 0;
++ }
++
++ /*
++ * Parse the ACPI tables for possible boot-time SMP configuration.
++ */
++ acpi_boot_table_init();
++#endif
++
++#ifdef CONFIG_X86_IO_APIC
++ check_acpi_pci(); /* Checks more than just ACPI actually */
++#endif
++
++#ifdef CONFIG_ACPI
++ acpi_boot_init();
++
++#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC)
++ if (def_to_bigsmp)
++ printk(KERN_WARNING "More than 8 CPUs detected and "
++ "CONFIG_X86_PC cannot handle it.\nUse "
++ "CONFIG_X86_GENERICARCH or CONFIG_X86_BIGSMP.\n");
++#endif
++#endif
++#ifdef CONFIG_X86_LOCAL_APIC
++ if (smp_found_config)
++ get_smp_config();
++#endif
++
++ register_memory();
++
++ if (is_initial_xendomain()) {
++#ifdef CONFIG_VT
++#if defined(CONFIG_VGA_CONSOLE)
++ if (!efi_enabled ||
++ (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
++ conswitchp = &vga_con;
++#elif defined(CONFIG_DUMMY_CONSOLE)
++ conswitchp = &dummy_con;
++#endif
++#endif
++ } else {
++#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
++ conswitchp = &dummy_con;
++#endif
++ }
++ tsc_init();
++}
++
++static int
++xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
++{
++ HYPERVISOR_shutdown(SHUTDOWN_crash);
++ /* we're never actually going to get here... */
++ return NOTIFY_DONE;
++}
++
++static __init int add_pcspkr(void)
++{
++ struct platform_device *pd;
++ int ret;
++
++ if (!is_initial_xendomain())
++ return 0;
++
++ pd = platform_device_alloc("pcspkr", -1);
++ if (!pd)
++ return -ENOMEM;
++
++ ret = platform_device_add(pd);
++ if (ret)
++ platform_device_put(pd);
++
++ return ret;
++}
++device_initcall(add_pcspkr);
++
++/*
++ * Local Variables:
++ * mode:c
++ * c-file-style:"k&r"
++ * c-basic-offset:8
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/setup.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/setup.c
+--- linux-2.6.18.8/arch/i386/kernel/setup.c 2007-02-23 15:52:30.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/setup.c 2008-02-15 16:21:49.000000000 -0800
+@@ -956,6 +956,28 @@ efi_memory_present_wrapper(unsigned long
+ return 0;
+ }
+
++/*
++ * This function checks if any part of the range <start,end> is mapped
++ * with type.
++ */
++int
++e820_any_mapped(u64 start, u64 end, unsigned type)
++{
++ int i;
++
++ for (i = 0; i < e820.nr_map; i++) {
++ const struct e820entry *ei = &e820.map[i];
++
++ if (type && ei->type != type)
++ continue;
++ if (ei->addr >= end || ei->addr + ei->size <= start)
++ continue;
++ return 1;
++ }
++ return 0;
++}
++EXPORT_SYMBOL_GPL(e820_any_mapped);
++
+ /*
+ * This function checks if the entire range <start,end> is mapped with type.
+ *
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/smp-xen.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/smp-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/smp-xen.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/smp-xen.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,605 @@
++/*
++ * Intel SMP support routines.
++ *
++ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
++ * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
++ *
++ * This code is released under the GNU General Public License version 2 or
++ * later.
++ */
++
++#include <linux/init.h>
++
++#include <linux/mm.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/smp_lock.h>
++#include <linux/kernel_stat.h>
++#include <linux/mc146818rtc.h>
++#include <linux/cache.h>
++#include <linux/interrupt.h>
++#include <linux/cpu.h>
++#include <linux/module.h>
++
++#include <asm/mtrr.h>
++#include <asm/tlbflush.h>
++#if 0
++#include <mach_apic.h>
++#endif
++#include <xen/evtchn.h>
++
++/*
++ * Some notes on x86 processor bugs affecting SMP operation:
++ *
++ * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
++ * The Linux implications for SMP are handled as follows:
++ *
++ * Pentium III / [Xeon]
++ * None of the E1AP-E3AP errata are visible to the user.
++ *
++ * E1AP. see PII A1AP
++ * E2AP. see PII A2AP
++ * E3AP. see PII A3AP
++ *
++ * Pentium II / [Xeon]
++ * None of the A1AP-A3AP errata are visible to the user.
++ *
++ * A1AP. see PPro 1AP
++ * A2AP. see PPro 2AP
++ * A3AP. see PPro 7AP
++ *
++ * Pentium Pro
++ * None of 1AP-9AP errata are visible to the normal user,
++ * except occasional delivery of 'spurious interrupt' as trap #15.
++ * This is very rare and a non-problem.
++ *
++ * 1AP. Linux maps APIC as non-cacheable
++ * 2AP. worked around in hardware
++ * 3AP. fixed in C0 and above steppings microcode update.
++ * Linux does not use excessive STARTUP_IPIs.
++ * 4AP. worked around in hardware
++ * 5AP. symmetric IO mode (normal Linux operation) not affected.
++ * 'noapic' mode has vector 0xf filled out properly.
++ * 6AP. 'noapic' mode might be affected - fixed in later steppings
++ * 7AP. We do not assume writes to the LVT deassering IRQs
++ * 8AP. We do not enable low power mode (deep sleep) during MP bootup
++ * 9AP. We do not use mixed mode
++ *
++ * Pentium
++ * There is a marginal case where REP MOVS on 100MHz SMP
++ * machines with B stepping processors can fail. XXX should provide
++ * an L1cache=Writethrough or L1cache=off option.
++ *
++ * B stepping CPUs may hang. There are hardware work arounds
++ * for this. We warn about it in case your board doesn't have the work
++ * arounds. Basically thats so I can tell anyone with a B stepping
++ * CPU and SMP problems "tough".
++ *
++ * Specific items [From Pentium Processor Specification Update]
++ *
++ * 1AP. Linux doesn't use remote read
++ * 2AP. Linux doesn't trust APIC errors
++ * 3AP. We work around this
++ * 4AP. Linux never generated 3 interrupts of the same priority
++ * to cause a lost local interrupt.
++ * 5AP. Remote read is never used
++ * 6AP. not affected - worked around in hardware
++ * 7AP. not affected - worked around in hardware
++ * 8AP. worked around in hardware - we get explicit CS errors if not
++ * 9AP. only 'noapic' mode affected. Might generate spurious
++ * interrupts, we log only the first one and count the
++ * rest silently.
++ * 10AP. not affected - worked around in hardware
++ * 11AP. Linux reads the APIC between writes to avoid this, as per
++ * the documentation. Make sure you preserve this as it affects
++ * the C stepping chips too.
++ * 12AP. not affected - worked around in hardware
++ * 13AP. not affected - worked around in hardware
++ * 14AP. we always deassert INIT during bootup
++ * 15AP. not affected - worked around in hardware
++ * 16AP. not affected - worked around in hardware
++ * 17AP. not affected - worked around in hardware
++ * 18AP. not affected - worked around in hardware
++ * 19AP. not affected - worked around in BIOS
++ *
++ * If this sounds worrying believe me these bugs are either ___RARE___,
++ * or are signal timing bugs worked around in hardware and there's
++ * about nothing of note with C stepping upwards.
++ */
++
++DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, };
++
++/*
++ * the following functions deal with sending IPIs between CPUs.
++ *
++ * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
++ */
++
++static inline int __prepare_ICR (unsigned int shortcut, int vector)
++{
++ unsigned int icr = shortcut | APIC_DEST_LOGICAL;
++
++ switch (vector) {
++ default:
++ icr |= APIC_DM_FIXED | vector;
++ break;
++ case NMI_VECTOR:
++ icr |= APIC_DM_NMI;
++ break;
++ }
++ return icr;
++}
++
++static inline int __prepare_ICR2 (unsigned int mask)
++{
++ return SET_APIC_DEST_FIELD(mask);
++}
++
++DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
++
++static inline void __send_IPI_one(unsigned int cpu, int vector)
++{
++ int irq = per_cpu(ipi_to_irq, cpu)[vector];
++ BUG_ON(irq < 0);
++ notify_remote_via_irq(irq);
++}
++
++void __send_IPI_shortcut(unsigned int shortcut, int vector)
++{
++ int cpu;
++
++ switch (shortcut) {
++ case APIC_DEST_SELF:
++ __send_IPI_one(smp_processor_id(), vector);
++ break;
++ case APIC_DEST_ALLBUT:
++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ if (cpu == smp_processor_id())
++ continue;
++ if (cpu_isset(cpu, cpu_online_map)) {
++ __send_IPI_one(cpu, vector);
++ }
++ }
++ break;
++ default:
++ printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
++ vector);
++ break;
++ }
++}
++
++void fastcall send_IPI_self(int vector)
++{
++ __send_IPI_shortcut(APIC_DEST_SELF, vector);
++}
++
++/*
++ * This is only used on smaller machines.
++ */
++void send_IPI_mask_bitmask(cpumask_t mask, int vector)
++{
++ unsigned long flags;
++ unsigned int cpu;
++
++ local_irq_save(flags);
++ WARN_ON(cpus_addr(mask)[0] & ~cpus_addr(cpu_online_map)[0]);
++
++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ if (cpu_isset(cpu, mask)) {
++ __send_IPI_one(cpu, vector);
++ }
++ }
++
++ local_irq_restore(flags);
++}
++
++void send_IPI_mask_sequence(cpumask_t mask, int vector)
++{
++
++ send_IPI_mask_bitmask(mask, vector);
++}
++
++#include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
++
++#if 0 /* XEN */
++/*
++ * Smarter SMP flushing macros.
++ * c/o Linus Torvalds.
++ *
++ * These mean you can really definitely utterly forget about
++ * writing to user space from interrupts. (Its not allowed anyway).
++ *
++ * Optimizations Manfred Spraul <manfred@colorfullife.com>
++ */
++
++static cpumask_t flush_cpumask;
++static struct mm_struct * flush_mm;
++static unsigned long flush_va;
++static DEFINE_SPINLOCK(tlbstate_lock);
++#define FLUSH_ALL 0xffffffff
++
++/*
++ * We cannot call mmdrop() because we are in interrupt context,
++ * instead update mm->cpu_vm_mask.
++ *
++ * We need to reload %cr3 since the page tables may be going
++ * away from under us..
++ */
++static inline void leave_mm (unsigned long cpu)
++{
++ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
++ BUG();
++ cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
++ load_cr3(swapper_pg_dir);
++}
++
++/*
++ *
++ * The flush IPI assumes that a thread switch happens in this order:
++ * [cpu0: the cpu that switches]
++ * 1) switch_mm() either 1a) or 1b)
++ * 1a) thread switch to a different mm
++ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
++ * Stop ipi delivery for the old mm. This is not synchronized with
++ * the other cpus, but smp_invalidate_interrupt ignore flush ipis
++ * for the wrong mm, and in the worst case we perform a superflous
++ * tlb flush.
++ * 1a2) set cpu_tlbstate to TLBSTATE_OK
++ * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
++ * was in lazy tlb mode.
++ * 1a3) update cpu_tlbstate[].active_mm
++ * Now cpu0 accepts tlb flushes for the new mm.
++ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
++ * Now the other cpus will send tlb flush ipis.
++ * 1a4) change cr3.
++ * 1b) thread switch without mm change
++ * cpu_tlbstate[].active_mm is correct, cpu0 already handles
++ * flush ipis.
++ * 1b1) set cpu_tlbstate to TLBSTATE_OK
++ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
++ * Atomically set the bit [other cpus will start sending flush ipis],
++ * and test the bit.
++ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
++ * 2) switch %%esp, ie current
++ *
++ * The interrupt must handle 2 special cases:
++ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
++ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
++ * runs in kernel space, the cpu could load tlb entries for user space
++ * pages.
++ *
++ * The good news is that cpu_tlbstate is local to each cpu, no
++ * write/read ordering problems.
++ */
++
++/*
++ * TLB flush IPI:
++ *
++ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
++ * 2) Leave the mm if we are in the lazy tlb mode.
++ */
++
++irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
++ struct pt_regs *regs)
++{
++ unsigned long cpu;
++
++ cpu = get_cpu();
++
++ if (!cpu_isset(cpu, flush_cpumask))
++ goto out;
++ /*
++ * This was a BUG() but until someone can quote me the
++ * line from the intel manual that guarantees an IPI to
++ * multiple CPUs is retried _only_ on the erroring CPUs
++ * its staying as a return
++ *
++ * BUG();
++ */
++
++ if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
++ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
++ if (flush_va == FLUSH_ALL)
++ local_flush_tlb();
++ else
++ __flush_tlb_one(flush_va);
++ } else
++ leave_mm(cpu);
++ }
++ smp_mb__before_clear_bit();
++ cpu_clear(cpu, flush_cpumask);
++ smp_mb__after_clear_bit();
++out:
++ put_cpu_no_resched();
++
++ return IRQ_HANDLED;
++}
++
++static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
++ unsigned long va)
++{
++ /*
++ * A couple of (to be removed) sanity checks:
++ *
++ * - current CPU must not be in mask
++ * - mask must exist :)
++ */
++ BUG_ON(cpus_empty(cpumask));
++ BUG_ON(cpu_isset(smp_processor_id(), cpumask));
++ BUG_ON(!mm);
++
++ /* If a CPU which we ran on has gone down, OK. */
++ cpus_and(cpumask, cpumask, cpu_online_map);
++ if (cpus_empty(cpumask))
++ return;
++
++ /*
++ * i'm not happy about this global shared spinlock in the
++ * MM hot path, but we'll see how contended it is.
++ * Temporarily this turns IRQs off, so that lockups are
++ * detected by the NMI watchdog.
++ */
++ spin_lock(&tlbstate_lock);
++
++ flush_mm = mm;
++ flush_va = va;
++#if NR_CPUS <= BITS_PER_LONG
++ atomic_set_mask(cpumask, &flush_cpumask);
++#else
++ {
++ int k;
++ unsigned long *flush_mask = (unsigned long *)&flush_cpumask;
++ unsigned long *cpu_mask = (unsigned long *)&cpumask;
++ for (k = 0; k < BITS_TO_LONGS(NR_CPUS); ++k)
++ atomic_set_mask(cpu_mask[k], &flush_mask[k]);
++ }
++#endif
++ /*
++ * We have to send the IPI only to
++ * CPUs affected.
++ */
++ send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
++
++ while (!cpus_empty(flush_cpumask))
++ /* nothing. lockup detection does not belong here */
++ mb();
++
++ flush_mm = NULL;
++ flush_va = 0;
++ spin_unlock(&tlbstate_lock);
++}
++
++void flush_tlb_current_task(void)
++{
++ struct mm_struct *mm = current->mm;
++ cpumask_t cpu_mask;
++
++ preempt_disable();
++ cpu_mask = mm->cpu_vm_mask;
++ cpu_clear(smp_processor_id(), cpu_mask);
++
++ local_flush_tlb();
++ if (!cpus_empty(cpu_mask))
++ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++ preempt_enable();
++}
++
++void flush_tlb_mm (struct mm_struct * mm)
++{
++ cpumask_t cpu_mask;
++
++ preempt_disable();
++ cpu_mask = mm->cpu_vm_mask;
++ cpu_clear(smp_processor_id(), cpu_mask);
++
++ if (current->active_mm == mm) {
++ if (current->mm)
++ local_flush_tlb();
++ else
++ leave_mm(smp_processor_id());
++ }
++ if (!cpus_empty(cpu_mask))
++ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++
++ preempt_enable();
++}
++
++void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ cpumask_t cpu_mask;
++
++ preempt_disable();
++ cpu_mask = mm->cpu_vm_mask;
++ cpu_clear(smp_processor_id(), cpu_mask);
++
++ if (current->active_mm == mm) {
++ if(current->mm)
++ __flush_tlb_one(va);
++ else
++ leave_mm(smp_processor_id());
++ }
++
++ if (!cpus_empty(cpu_mask))
++ flush_tlb_others(cpu_mask, mm, va);
++
++ preempt_enable();
++}
++EXPORT_SYMBOL(flush_tlb_page);
++
++static void do_flush_tlb_all(void* info)
++{
++ unsigned long cpu = smp_processor_id();
++
++ __flush_tlb_all();
++ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
++ leave_mm(cpu);
++}
++
++void flush_tlb_all(void)
++{
++ on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
++}
++
++#endif /* XEN */
++
++/*
++ * this function sends a 'reschedule' IPI to another CPU.
++ * it goes straight through and wastes no time serializing
++ * anything. Worst case is that we lose a reschedule ...
++ */
++void smp_send_reschedule(int cpu)
++{
++ WARN_ON(cpu_is_offline(cpu));
++ send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
++}
++
++/*
++ * Structure and data for smp_call_function(). This is designed to minimise
++ * static memory requirements. It also looks cleaner.
++ */
++static DEFINE_SPINLOCK(call_lock);
++
++struct call_data_struct {
++ void (*func) (void *info);
++ void *info;
++ atomic_t started;
++ atomic_t finished;
++ int wait;
++};
++
++void lock_ipi_call_lock(void)
++{
++ spin_lock_irq(&call_lock);
++}
++
++void unlock_ipi_call_lock(void)
++{
++ spin_unlock_irq(&call_lock);
++}
++
++static struct call_data_struct *call_data;
++
++/**
++ * smp_call_function(): Run a function on all other CPUs.
++ * @func: The function to run. This must be fast and non-blocking.
++ * @info: An arbitrary pointer to pass to the function.
++ * @nonatomic: currently unused.
++ * @wait: If true, wait (atomically) until function has completed on other CPUs.
++ *
++ * Returns 0 on success, else a negative status code. Does not return until
++ * remote CPUs are nearly ready to execute <<func>> or are or have executed.
++ *
++ * You must not call this function with disabled interrupts or from a
++ * hardware interrupt handler or from a bottom half handler.
++ */
++int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
++ int wait)
++{
++ struct call_data_struct data;
++ int cpus;
++
++ /* Holding any lock stops cpus from going down. */
++ spin_lock(&call_lock);
++ cpus = num_online_cpus() - 1;
++ if (!cpus) {
++ spin_unlock(&call_lock);
++ return 0;
++ }
++
++ /* Can deadlock when called with interrupts disabled */
++ WARN_ON(irqs_disabled());
++
++ data.func = func;
++ data.info = info;
++ atomic_set(&data.started, 0);
++ data.wait = wait;
++ if (wait)
++ atomic_set(&data.finished, 0);
++
++ call_data = &data;
++ mb();
++
++ /* Send a message to all other CPUs and wait for them to respond */
++ send_IPI_allbutself(CALL_FUNCTION_VECTOR);
++
++ /* Wait for response */
++ while (atomic_read(&data.started) != cpus)
++ cpu_relax();
++
++ if (wait)
++ while (atomic_read(&data.finished) != cpus)
++ cpu_relax();
++ spin_unlock(&call_lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(smp_call_function);
++
++static void stop_this_cpu (void * dummy)
++{
++ /*
++ * Remove this CPU:
++ */
++ cpu_clear(smp_processor_id(), cpu_online_map);
++ local_irq_disable();
++ disable_all_local_evtchn();
++ if (cpu_data[smp_processor_id()].hlt_works_ok)
++ for(;;) halt();
++ for (;;);
++}
++
++/*
++ * this function calls the 'stop' function on all other CPUs in the system.
++ */
++
++void smp_send_stop(void)
++{
++ smp_call_function(stop_this_cpu, NULL, 1, 0);
++
++ local_irq_disable();
++ disable_all_local_evtchn();
++ local_irq_enable();
++}
++
++/*
++ * Reschedule call back. Nothing to do,
++ * all the work is done automatically when
++ * we return from the interrupt.
++ */
++irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id,
++ struct pt_regs *regs)
++{
++
++ return IRQ_HANDLED;
++}
++
++#include <linux/kallsyms.h>
++irqreturn_t smp_call_function_interrupt(int irq, void *dev_id,
++ struct pt_regs *regs)
++{
++ void (*func) (void *info) = call_data->func;
++ void *info = call_data->info;
++ int wait = call_data->wait;
++
++ /*
++ * Notify initiating CPU that I've grabbed the data and am
++ * about to execute the function
++ */
++ mb();
++ atomic_inc(&call_data->started);
++ /*
++ * At this point the info structure may be out of scope unless wait==1
++ */
++ irq_enter();
++ (*func)(info);
++ irq_exit();
++
++ if (wait) {
++ mb();
++ atomic_inc(&call_data->finished);
++ }
++
++ return IRQ_HANDLED;
++}
++
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/sysenter.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/sysenter.c
+--- linux-2.6.18.8/arch/i386/kernel/sysenter.c 2007-02-23 15:52:30.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/sysenter.c 2008-02-15 16:21:49.000000000 -0800
+@@ -23,6 +23,10 @@
+ #include <asm/pgtable.h>
+ #include <asm/unistd.h>
+
++#ifdef CONFIG_XEN
++#include <xen/interface/callback.h>
++#endif
++
+ /*
+ * Should the kernel map a VDSO page into processes and pass its
+ * address down to glibc upon exec()?
+@@ -44,6 +48,7 @@ extern asmlinkage void sysenter_entry(vo
+
+ void enable_sep_cpu(void)
+ {
++#ifndef CONFIG_XEN
+ int cpu = get_cpu();
+ struct tss_struct *tss = &per_cpu(init_tss, cpu);
+
+@@ -57,7 +62,36 @@ void enable_sep_cpu(void)
+ wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
+ wrmsr(MSR_IA32_SYSENTER_ESP, tss->esp1, 0);
+ wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) sysenter_entry, 0);
+- put_cpu();
++#else
++ extern asmlinkage void sysenter_entry_pv(void);
++ static struct callback_register sysenter = {
++ .type = CALLBACKTYPE_sysenter,
++ .address = { __KERNEL_CS, (unsigned long)sysenter_entry_pv },
++ };
++
++ if (!boot_cpu_has(X86_FEATURE_SEP))
++ return;
++
++ get_cpu();
++
++ if (xen_feature(XENFEAT_supervisor_mode_kernel))
++ sysenter.address.eip = (unsigned long)sysenter_entry;
++
++ switch (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter)) {
++ case 0:
++ break;
++#if CONFIG_XEN_COMPAT < 0x030200
++ case -ENOSYS:
++ sysenter.type = CALLBACKTYPE_sysenter_deprecated;
++ if (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter) == 0)
++ break;
++#endif
++ default:
++ clear_bit(X86_FEATURE_SEP, boot_cpu_data.x86_capability);
++ break;
++ }
++#endif
++ put_cpu();
+ }
+
+ /*
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/time-xen.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/time-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/time-xen.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/time-xen.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,1194 @@
++/*
++ * linux/arch/i386/kernel/time.c
++ *
++ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
++ *
++ * This file contains the PC-specific time handling details:
++ * reading the RTC at bootup, etc..
++ * 1994-07-02 Alan Modra
++ * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
++ * 1995-03-26 Markus Kuhn
++ * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
++ * precision CMOS clock update
++ * 1996-05-03 Ingo Molnar
++ * fixed time warps in do_[slow|fast]_gettimeoffset()
++ * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
++ * "A Kernel Model for Precision Timekeeping" by Dave Mills
++ * 1998-09-05 (Various)
++ * More robust do_fast_gettimeoffset() algorithm implemented
++ * (works with APM, Cyrix 6x86MX and Centaur C6),
++ * monotonic gettimeofday() with fast_get_timeoffset(),
++ * drift-proof precision TSC calibration on boot
++ * (C. Scott Ananian <cananian@alumni.princeton.edu>, Andrew D.
++ * Balsa <andrebalsa@altern.org>, Philip Gladstone <philip@raptor.com>;
++ * ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause@tu-harburg.de>).
++ * 1998-12-16 Andrea Arcangeli
++ * Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
++ * because was not accounting lost_ticks.
++ * 1998-12-24 Copyright (C) 1998 Andrea Arcangeli
++ * Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
++ * serialize accesses to xtime/lost_ticks).
++ */
++
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/param.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/time.h>
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/smp.h>
++#include <linux/module.h>
++#include <linux/sysdev.h>
++#include <linux/bcd.h>
++#include <linux/efi.h>
++#include <linux/mca.h>
++#include <linux/sysctl.h>
++#include <linux/percpu.h>
++#include <linux/kernel_stat.h>
++#include <linux/posix-timers.h>
++#include <linux/cpufreq.h>
++
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/irq.h>
++#include <asm/msr.h>
++#include <asm/delay.h>
++#include <asm/mpspec.h>
++#include <asm/uaccess.h>
++#include <asm/processor.h>
++#include <asm/timer.h>
++#include <asm/sections.h>
++
++#include "mach_time.h"
++
++#include <linux/timex.h>
++
++#include <asm/hpet.h>
++
++#include <asm/arch_hooks.h>
++
++#include <xen/evtchn.h>
++#include <xen/interface/vcpu.h>
++
++#if defined (__i386__)
++#include <asm/i8259.h>
++#endif
++
++int pit_latch_buggy; /* extern */
++
++#if defined(__x86_64__)
++unsigned long vxtime_hz = PIT_TICK_RATE;
++struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */
++volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
++unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
++struct timespec __xtime __section_xtime;
++struct timezone __sys_tz __section_sys_tz;
++#endif
++
++unsigned int cpu_khz; /* Detected as we calibrate the TSC */
++EXPORT_SYMBOL(cpu_khz);
++
++extern unsigned long wall_jiffies;
++
++DEFINE_SPINLOCK(rtc_lock);
++EXPORT_SYMBOL(rtc_lock);
++
++extern struct init_timer_opts timer_tsc_init;
++extern struct timer_opts timer_tsc;
++#define timer_none timer_tsc
++
++/* These are peridically updated in shared_info, and then copied here. */
++struct shadow_time_info {
++ u64 tsc_timestamp; /* TSC at last update of time vals. */
++ u64 system_timestamp; /* Time, in nanosecs, since boot. */
++ u32 tsc_to_nsec_mul;
++ u32 tsc_to_usec_mul;
++ int tsc_shift;
++ u32 version;
++};
++static DEFINE_PER_CPU(struct shadow_time_info, shadow_time);
++static struct timespec shadow_tv;
++static u32 shadow_tv_version;
++
++static struct timeval monotonic_tv;
++static spinlock_t monotonic_lock = SPIN_LOCK_UNLOCKED;
++
++/* Keep track of last time we did processing/updating of jiffies and xtime. */
++static u64 processed_system_time; /* System time (ns) at last processing. */
++static DEFINE_PER_CPU(u64, processed_system_time);
++
++/* How much CPU time was spent blocked and how much was 'stolen'? */
++static DEFINE_PER_CPU(u64, processed_stolen_time);
++static DEFINE_PER_CPU(u64, processed_blocked_time);
++
++/* Current runstate of each CPU (updated automatically by the hypervisor). */
++static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
++
++/* Must be signed, as it's compared with s64 quantities which can be -ve. */
++#define NS_PER_TICK (1000000000LL/HZ)
++
++static void __clock_was_set(void *unused)
++{
++ clock_was_set();
++}
++static DECLARE_WORK(clock_was_set_work, __clock_was_set, NULL);
++
++static inline void __normalize_time(time_t *sec, s64 *nsec)
++{
++ while (*nsec >= NSEC_PER_SEC) {
++ (*nsec) -= NSEC_PER_SEC;
++ (*sec)++;
++ }
++ while (*nsec < 0) {
++ (*nsec) += NSEC_PER_SEC;
++ (*sec)--;
++ }
++}
++
++/* Does this guest OS track Xen time, or set its wall clock independently? */
++static int independent_wallclock = 0;
++static int __init __independent_wallclock(char *str)
++{
++ independent_wallclock = 1;
++ return 1;
++}
++__setup("independent_wallclock", __independent_wallclock);
++
++/* Permitted clock jitter, in nsecs, beyond which a warning will be printed. */
++static unsigned long permitted_clock_jitter = 10000000UL; /* 10ms */
++static int __init __permitted_clock_jitter(char *str)
++{
++ permitted_clock_jitter = simple_strtoul(str, NULL, 0);
++ return 1;
++}
++__setup("permitted_clock_jitter=", __permitted_clock_jitter);
++
++#if 0
++static void delay_tsc(unsigned long loops)
++{
++ unsigned long bclock, now;
++
++ rdtscl(bclock);
++ do {
++ rep_nop();
++ rdtscl(now);
++ } while ((now - bclock) < loops);
++}
++
++struct timer_opts timer_tsc = {
++ .name = "tsc",
++ .delay = delay_tsc,
++};
++#endif
++
++/*
++ * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
++ * yielding a 64-bit result.
++ */
++static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
++{
++ u64 product;
++#ifdef __i386__
++ u32 tmp1, tmp2;
++#endif
++
++ if (shift < 0)
++ delta >>= -shift;
++ else
++ delta <<= shift;
++
++#ifdef __i386__
++ __asm__ (
++ "mul %5 ; "
++ "mov %4,%%eax ; "
++ "mov %%edx,%4 ; "
++ "mul %5 ; "
++ "xor %5,%5 ; "
++ "add %4,%%eax ; "
++ "adc %5,%%edx ; "
++ : "=A" (product), "=r" (tmp1), "=r" (tmp2)
++ : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
++#else
++ __asm__ (
++ "mul %%rdx ; shrd $32,%%rdx,%%rax"
++ : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
++#endif
++
++ return product;
++}
++
++#if 0 /* defined (__i386__) */
++int read_current_timer(unsigned long *timer_val)
++{
++ rdtscl(*timer_val);
++ return 0;
++}
++#endif
++
++void init_cpu_khz(void)
++{
++ u64 __cpu_khz = 1000000ULL << 32;
++ struct vcpu_time_info *info = &vcpu_info(0)->time;
++ do_div(__cpu_khz, info->tsc_to_system_mul);
++ if (info->tsc_shift < 0)
++ cpu_khz = __cpu_khz << -info->tsc_shift;
++ else
++ cpu_khz = __cpu_khz >> info->tsc_shift;
++}
++
++static u64 get_nsec_offset(struct shadow_time_info *shadow)
++{
++ u64 now, delta;
++ rdtscll(now);
++ delta = now - shadow->tsc_timestamp;
++ return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
++}
++
++static unsigned long get_usec_offset(struct shadow_time_info *shadow)
++{
++ u64 now, delta;
++ rdtscll(now);
++ delta = now - shadow->tsc_timestamp;
++ return scale_delta(delta, shadow->tsc_to_usec_mul, shadow->tsc_shift);
++}
++
++static void __update_wallclock(time_t sec, long nsec)
++{
++ long wtm_nsec, xtime_nsec;
++ time_t wtm_sec, xtime_sec;
++ u64 tmp, wc_nsec;
++
++ /* Adjust wall-clock time base based on wall_jiffies ticks. */
++ wc_nsec = processed_system_time;
++ wc_nsec += sec * (u64)NSEC_PER_SEC;
++ wc_nsec += nsec;
++ wc_nsec -= (jiffies - wall_jiffies) * (u64)NS_PER_TICK;
++
++ /* Split wallclock base into seconds and nanoseconds. */
++ tmp = wc_nsec;
++ xtime_nsec = do_div(tmp, 1000000000);
++ xtime_sec = (time_t)tmp;
++
++ wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - xtime_sec);
++ wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - xtime_nsec);
++
++ set_normalized_timespec(&xtime, xtime_sec, xtime_nsec);
++ set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
++
++ ntp_clear();
++}
++
++static void update_wallclock(void)
++{
++ shared_info_t *s = HYPERVISOR_shared_info;
++
++ do {
++ shadow_tv_version = s->wc_version;
++ rmb();
++ shadow_tv.tv_sec = s->wc_sec;
++ shadow_tv.tv_nsec = s->wc_nsec;
++ rmb();
++ } while ((s->wc_version & 1) | (shadow_tv_version ^ s->wc_version));
++
++ if (!independent_wallclock)
++ __update_wallclock(shadow_tv.tv_sec, shadow_tv.tv_nsec);
++}
++
++/*
++ * Reads a consistent set of time-base values from Xen, into a shadow data
++ * area.
++ */
++static void get_time_values_from_xen(unsigned int cpu)
++{
++ struct vcpu_time_info *src;
++ struct shadow_time_info *dst;
++
++ src = &vcpu_info(cpu)->time;
++ dst = &per_cpu(shadow_time, cpu);
++
++ do {
++ dst->version = src->version;
++ rmb();
++ dst->tsc_timestamp = src->tsc_timestamp;
++ dst->system_timestamp = src->system_time;
++ dst->tsc_to_nsec_mul = src->tsc_to_system_mul;
++ dst->tsc_shift = src->tsc_shift;
++ rmb();
++ } while ((src->version & 1) | (dst->version ^ src->version));
++
++ dst->tsc_to_usec_mul = dst->tsc_to_nsec_mul / 1000;
++}
++
++static inline int time_values_up_to_date(unsigned int cpu)
++{
++ struct vcpu_time_info *src;
++ struct shadow_time_info *dst;
++
++ src = &vcpu_info(cpu)->time;
++ dst = &per_cpu(shadow_time, cpu);
++
++ rmb();
++ return (dst->version == src->version);
++}
++
++/*
++ * This is a special lock that is owned by the CPU and holds the index
++ * register we are working with. It is required for NMI access to the
++ * CMOS/RTC registers. See include/asm-i386/mc146818rtc.h for details.
++ */
++volatile unsigned long cmos_lock = 0;
++EXPORT_SYMBOL(cmos_lock);
++
++/* Routines for accessing the CMOS RAM/RTC. */
++unsigned char rtc_cmos_read(unsigned char addr)
++{
++ unsigned char val;
++ lock_cmos_prefix(addr);
++ outb_p(addr, RTC_PORT(0));
++ val = inb_p(RTC_PORT(1));
++ lock_cmos_suffix(addr);
++ return val;
++}
++EXPORT_SYMBOL(rtc_cmos_read);
++
++void rtc_cmos_write(unsigned char val, unsigned char addr)
++{
++ lock_cmos_prefix(addr);
++ outb_p(addr, RTC_PORT(0));
++ outb_p(val, RTC_PORT(1));
++ lock_cmos_suffix(addr);
++}
++EXPORT_SYMBOL(rtc_cmos_write);
++
++/*
++ * This version of gettimeofday has microsecond resolution
++ * and better than microsecond precision on fast x86 machines with TSC.
++ */
++void do_gettimeofday(struct timeval *tv)
++{
++ unsigned long seq;
++ unsigned long usec, sec;
++ unsigned long flags;
++ s64 nsec;
++ unsigned int cpu;
++ struct shadow_time_info *shadow;
++ u32 local_time_version;
++
++ cpu = get_cpu();
++ shadow = &per_cpu(shadow_time, cpu);
++
++ do {
++ unsigned long lost;
++
++ local_time_version = shadow->version;
++ seq = read_seqbegin(&xtime_lock);
++
++ usec = get_usec_offset(shadow);
++ lost = jiffies - wall_jiffies;
++
++ if (unlikely(lost))
++ usec += lost * (USEC_PER_SEC / HZ);
++
++ sec = xtime.tv_sec;
++ usec += (xtime.tv_nsec / NSEC_PER_USEC);
++
++ nsec = shadow->system_timestamp - processed_system_time;
++ __normalize_time(&sec, &nsec);
++ usec += (long)nsec / NSEC_PER_USEC;
++
++ if (unlikely(!time_values_up_to_date(cpu))) {
++ /*
++ * We may have blocked for a long time,
++ * rendering our calculations invalid
++ * (e.g. the time delta may have
++ * overflowed). Detect that and recalculate
++ * with fresh values.
++ */
++ get_time_values_from_xen(cpu);
++ continue;
++ }
++ } while (read_seqretry(&xtime_lock, seq) ||
++ (local_time_version != shadow->version));
++
++ put_cpu();
++
++ while (usec >= USEC_PER_SEC) {
++ usec -= USEC_PER_SEC;
++ sec++;
++ }
++
++ spin_lock_irqsave(&monotonic_lock, flags);
++ if ((sec > monotonic_tv.tv_sec) ||
++ ((sec == monotonic_tv.tv_sec) && (usec > monotonic_tv.tv_usec)))
++ {
++ monotonic_tv.tv_sec = sec;
++ monotonic_tv.tv_usec = usec;
++ } else {
++ sec = monotonic_tv.tv_sec;
++ usec = monotonic_tv.tv_usec;
++ }
++ spin_unlock_irqrestore(&monotonic_lock, flags);
++
++ tv->tv_sec = sec;
++ tv->tv_usec = usec;
++}
++
++EXPORT_SYMBOL(do_gettimeofday);
++
++int do_settimeofday(struct timespec *tv)
++{
++ time_t sec;
++ s64 nsec;
++ unsigned int cpu;
++ struct shadow_time_info *shadow;
++ struct xen_platform_op op;
++
++ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
++ return -EINVAL;
++
++ cpu = get_cpu();
++ shadow = &per_cpu(shadow_time, cpu);
++
++ write_seqlock_irq(&xtime_lock);
++
++ /*
++ * Ensure we don't get blocked for a long time so that our time delta
++ * overflows. If that were to happen then our shadow time values would
++ * be stale, so we can retry with fresh ones.
++ */
++ for (;;) {
++ nsec = tv->tv_nsec - get_nsec_offset(shadow);
++ if (time_values_up_to_date(cpu))
++ break;
++ get_time_values_from_xen(cpu);
++ }
++ sec = tv->tv_sec;
++ __normalize_time(&sec, &nsec);
++
++ if (is_initial_xendomain() && !independent_wallclock) {
++ op.cmd = XENPF_settime;
++ op.u.settime.secs = sec;
++ op.u.settime.nsecs = nsec;
++ op.u.settime.system_time = shadow->system_timestamp;
++ WARN_ON(HYPERVISOR_platform_op(&op));
++ update_wallclock();
++ } else if (independent_wallclock) {
++ nsec -= shadow->system_timestamp;
++ __normalize_time(&sec, &nsec);
++ __update_wallclock(sec, nsec);
++ }
++
++ /* Reset monotonic gettimeofday() timeval. */
++ spin_lock(&monotonic_lock);
++ monotonic_tv.tv_sec = 0;
++ monotonic_tv.tv_usec = 0;
++ spin_unlock(&monotonic_lock);
++
++ write_sequnlock_irq(&xtime_lock);
++
++ put_cpu();
++
++ clock_was_set();
++ return 0;
++}
++
++EXPORT_SYMBOL(do_settimeofday);
++
++static void sync_xen_wallclock(unsigned long dummy);
++static DEFINE_TIMER(sync_xen_wallclock_timer, sync_xen_wallclock, 0, 0);
++static void sync_xen_wallclock(unsigned long dummy)
++{
++ time_t sec;
++ s64 nsec;
++ struct xen_platform_op op;
++
++ if (!ntp_synced() || independent_wallclock || !is_initial_xendomain())
++ return;
++
++ write_seqlock_irq(&xtime_lock);
++
++ sec = xtime.tv_sec;
++ nsec = xtime.tv_nsec + ((jiffies - wall_jiffies) * (u64)NS_PER_TICK);
++ __normalize_time(&sec, &nsec);
++
++ op.cmd = XENPF_settime;
++ op.u.settime.secs = sec;
++ op.u.settime.nsecs = nsec;
++ op.u.settime.system_time = processed_system_time;
++ WARN_ON(HYPERVISOR_platform_op(&op));
++
++ update_wallclock();
++
++ write_sequnlock_irq(&xtime_lock);
++
++ /* Once per minute. */
++ mod_timer(&sync_xen_wallclock_timer, jiffies + 60*HZ);
++}
++
++static int set_rtc_mmss(unsigned long nowtime)
++{
++ int retval;
++ unsigned long flags;
++
++ if (independent_wallclock || !is_initial_xendomain())
++ return 0;
++
++ /* gets recalled with irq locally disabled */
++ /* XXX - does irqsave resolve this? -johnstul */
++ spin_lock_irqsave(&rtc_lock, flags);
++ if (efi_enabled)
++ retval = efi_set_rtc_mmss(nowtime);
++ else
++ retval = mach_set_rtc_mmss(nowtime);
++ spin_unlock_irqrestore(&rtc_lock, flags);
++
++ return retval;
++}
++
++/* monotonic_clock(): returns # of nanoseconds passed since time_init()
++ * Note: This function is required to return accurate
++ * time even in the absence of multiple timer ticks.
++ */
++unsigned long long monotonic_clock(void)
++{
++ unsigned int cpu = get_cpu();
++ struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
++ u64 time;
++ u32 local_time_version;
++
++ do {
++ local_time_version = shadow->version;
++ barrier();
++ time = shadow->system_timestamp + get_nsec_offset(shadow);
++ if (!time_values_up_to_date(cpu))
++ get_time_values_from_xen(cpu);
++ barrier();
++ } while (local_time_version != shadow->version);
++
++ put_cpu();
++
++ return time;
++}
++EXPORT_SYMBOL(monotonic_clock);
++
++#ifdef __x86_64__
++unsigned long long sched_clock(void)
++{
++ return monotonic_clock();
++}
++#endif
++
++#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
++unsigned long profile_pc(struct pt_regs *regs)
++{
++ unsigned long pc = instruction_pointer(regs);
++
++#ifdef __x86_64__
++ /* Assume the lock function has either no stack frame or only a single word.
++ This checks if the address on the stack looks like a kernel text address.
++ There is a small window for false hits, but in that case the tick
++ is just accounted to the spinlock function.
++ Better would be to write these functions in assembler again
++ and check exactly. */
++ if (!user_mode_vm(regs) && in_lock_functions(pc)) {
++ char *v = *(char **)regs->rsp;
++ if ((v >= _stext && v <= _etext) ||
++ (v >= _sinittext && v <= _einittext) ||
++ (v >= (char *)MODULES_VADDR && v <= (char *)MODULES_END))
++ return (unsigned long)v;
++ return ((unsigned long *)regs->rsp)[1];
++ }
++#else
++ if (!user_mode_vm(regs) && in_lock_functions(pc))
++ return *(unsigned long *)(regs->ebp + 4);
++#endif
++
++ return pc;
++}
++EXPORT_SYMBOL(profile_pc);
++#endif
++
++/*
++ * This is the same as the above, except we _also_ save the current
++ * Time Stamp Counter value at the time of the timer interrupt, so that
++ * we later on can estimate the time of day more exactly.
++ */
++irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
++{
++ s64 delta, delta_cpu, stolen, blocked;
++ u64 sched_time;
++ unsigned int i, cpu = smp_processor_id();
++ struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
++ struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
++
++ /*
++ * Here we are in the timer irq handler. We just have irqs locally
++ * disabled but we don't know if the timer_bh is running on the other
++ * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
++ * the irq version of write_lock because as just said we have irq
++ * locally disabled. -arca
++ */
++ write_seqlock(&xtime_lock);
++
++ do {
++ get_time_values_from_xen(cpu);
++
++ /* Obtain a consistent snapshot of elapsed wallclock cycles. */
++ delta = delta_cpu =
++ shadow->system_timestamp + get_nsec_offset(shadow);
++ delta -= processed_system_time;
++ delta_cpu -= per_cpu(processed_system_time, cpu);
++
++ /*
++ * Obtain a consistent snapshot of stolen/blocked cycles. We
++ * can use state_entry_time to detect if we get preempted here.
++ */
++ do {
++ sched_time = runstate->state_entry_time;
++ barrier();
++ stolen = runstate->time[RUNSTATE_runnable] +
++ runstate->time[RUNSTATE_offline] -
++ per_cpu(processed_stolen_time, cpu);
++ blocked = runstate->time[RUNSTATE_blocked] -
++ per_cpu(processed_blocked_time, cpu);
++ barrier();
++ } while (sched_time != runstate->state_entry_time);
++ } while (!time_values_up_to_date(cpu));
++
++ if ((unlikely(delta < -(s64)permitted_clock_jitter) ||
++ unlikely(delta_cpu < -(s64)permitted_clock_jitter))
++ && printk_ratelimit()) {
++ printk("Timer ISR/%u: Time went backwards: "
++ "delta=%lld delta_cpu=%lld shadow=%lld "
++ "off=%lld processed=%lld cpu_processed=%lld\n",
++ cpu, delta, delta_cpu, shadow->system_timestamp,
++ (s64)get_nsec_offset(shadow),
++ processed_system_time,
++ per_cpu(processed_system_time, cpu));
++ for (i = 0; i < num_online_cpus(); i++)
++ printk(" %d: %lld\n", i,
++ per_cpu(processed_system_time, i));
++ }
++
++ /* System-wide jiffy work. */
++ while (delta >= NS_PER_TICK) {
++ delta -= NS_PER_TICK;
++ processed_system_time += NS_PER_TICK;
++ do_timer(regs);
++ }
++
++ if (shadow_tv_version != HYPERVISOR_shared_info->wc_version) {
++ update_wallclock();
++ if (keventd_up())
++ schedule_work(&clock_was_set_work);
++ }
++
++ write_sequnlock(&xtime_lock);
++
++ /*
++ * Account stolen ticks.
++ * HACK: Passing NULL to account_steal_time()
++ * ensures that the ticks are accounted as stolen.
++ */
++ if ((stolen > 0) && (delta_cpu > 0)) {
++ delta_cpu -= stolen;
++ if (unlikely(delta_cpu < 0))
++ stolen += delta_cpu; /* clamp local-time progress */
++ do_div(stolen, NS_PER_TICK);
++ per_cpu(processed_stolen_time, cpu) += stolen * NS_PER_TICK;
++ per_cpu(processed_system_time, cpu) += stolen * NS_PER_TICK;
++ account_steal_time(NULL, (cputime_t)stolen);
++ }
++
++ /*
++ * Account blocked ticks.
++ * HACK: Passing idle_task to account_steal_time()
++ * ensures that the ticks are accounted as idle/wait.
++ */
++ if ((blocked > 0) && (delta_cpu > 0)) {
++ delta_cpu -= blocked;
++ if (unlikely(delta_cpu < 0))
++ blocked += delta_cpu; /* clamp local-time progress */
++ do_div(blocked, NS_PER_TICK);
++ per_cpu(processed_blocked_time, cpu) += blocked * NS_PER_TICK;
++ per_cpu(processed_system_time, cpu) += blocked * NS_PER_TICK;
++ account_steal_time(idle_task(cpu), (cputime_t)blocked);
++ }
++
++ /* Account user/system ticks. */
++ if (delta_cpu > 0) {
++ do_div(delta_cpu, NS_PER_TICK);
++ per_cpu(processed_system_time, cpu) += delta_cpu * NS_PER_TICK;
++ if (user_mode_vm(regs))
++ account_user_time(current, (cputime_t)delta_cpu);
++ else
++ account_system_time(current, HARDIRQ_OFFSET,
++ (cputime_t)delta_cpu);
++ }
++
++ /* Offlined for more than a few seconds? Avoid lockup warnings. */
++ if (stolen > 5*HZ)
++ touch_softlockup_watchdog();
++
++ /* Local timer processing (see update_process_times()). */
++ run_local_timers();
++ if (rcu_pending(cpu))
++ rcu_check_callbacks(cpu, user_mode_vm(regs));
++ scheduler_tick();
++ run_posix_cpu_timers(current);
++ profile_tick(CPU_PROFILING, regs);
++
++ return IRQ_HANDLED;
++}
++
++static void init_missing_ticks_accounting(unsigned int cpu)
++{
++ struct vcpu_register_runstate_memory_area area;
++ struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
++ int rc;
++
++ memset(runstate, 0, sizeof(*runstate));
++
++ area.addr.v = runstate;
++ rc = HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu, &area);
++ WARN_ON(rc && rc != -ENOSYS);
++
++ per_cpu(processed_blocked_time, cpu) =
++ runstate->time[RUNSTATE_blocked];
++ per_cpu(processed_stolen_time, cpu) =
++ runstate->time[RUNSTATE_runnable] +
++ runstate->time[RUNSTATE_offline];
++}
++
++/* not static: needed by APM */
++unsigned long get_cmos_time(void)
++{
++ unsigned long retval;
++ unsigned long flags;
++
++ spin_lock_irqsave(&rtc_lock, flags);
++
++ if (efi_enabled)
++ retval = efi_get_time();
++ else
++ retval = mach_get_cmos_time();
++
++ spin_unlock_irqrestore(&rtc_lock, flags);
++
++ return retval;
++}
++EXPORT_SYMBOL(get_cmos_time);
++
++static void sync_cmos_clock(unsigned long dummy);
++
++static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
++
++static void sync_cmos_clock(unsigned long dummy)
++{
++ struct timeval now, next;
++ int fail = 1;
++
++ /*
++ * If we have an externally synchronized Linux clock, then update
++ * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
++ * called as close as possible to 500 ms before the new second starts.
++ * This code is run on a timer. If the clock is set, that timer
++ * may not expire at the correct time. Thus, we adjust...
++ */
++ if (!ntp_synced())
++ /*
++ * Not synced, exit, do not restart a timer (if one is
++ * running, let it run out).
++ */
++ return;
++
++ do_gettimeofday(&now);
++ if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
++ now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
++ fail = set_rtc_mmss(now.tv_sec);
++
++ next.tv_usec = USEC_AFTER - now.tv_usec;
++ if (next.tv_usec <= 0)
++ next.tv_usec += USEC_PER_SEC;
++
++ if (!fail)
++ next.tv_sec = 659;
++ else
++ next.tv_sec = 0;
++
++ if (next.tv_usec >= USEC_PER_SEC) {
++ next.tv_sec++;
++ next.tv_usec -= USEC_PER_SEC;
++ }
++ mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
++}
++
++void notify_arch_cmos_timer(void)
++{
++ mod_timer(&sync_cmos_timer, jiffies + 1);
++ mod_timer(&sync_xen_wallclock_timer, jiffies + 1);
++}
++
++static int timer_resume(struct sys_device *dev)
++{
++ extern void time_resume(void);
++ time_resume();
++ return 0;
++}
++
++static struct sysdev_class timer_sysclass = {
++ .resume = timer_resume,
++ set_kset_name("timer"),
++};
++
++
++/* XXX this driverfs stuff should probably go elsewhere later -john */
++static struct sys_device device_timer = {
++ .id = 0,
++ .cls = &timer_sysclass,
++};
++
++static int time_init_device(void)
++{
++ int error = sysdev_class_register(&timer_sysclass);
++ if (!error)
++ error = sysdev_register(&device_timer);
++ return error;
++}
++
++device_initcall(time_init_device);
++
++#ifdef CONFIG_HPET_TIMER
++extern void (*late_time_init)(void);
++/* Duplicate of time_init() below, with hpet_enable part added */
++static void __init hpet_time_init(void)
++{
++ xtime.tv_sec = get_cmos_time();
++ xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
++ set_normalized_timespec(&wall_to_monotonic,
++ -xtime.tv_sec, -xtime.tv_nsec);
++
++ if ((hpet_enable() >= 0) && hpet_use_timer) {
++ printk("Using HPET for base-timer\n");
++ }
++
++ time_init_hook();
++}
++#endif
++
++/* Dynamically-mapped IRQ. */
++DEFINE_PER_CPU(int, timer_irq);
++
++extern void (*late_time_init)(void);
++static void setup_cpu0_timer_irq(void)
++{
++ per_cpu(timer_irq, 0) =
++ bind_virq_to_irqhandler(
++ VIRQ_TIMER,
++ 0,
++ timer_interrupt,
++ SA_INTERRUPT,
++ "timer0",
++ NULL);
++ BUG_ON(per_cpu(timer_irq, 0) < 0);
++}
++
++static struct vcpu_set_periodic_timer xen_set_periodic_tick = {
++ .period_ns = NS_PER_TICK
++};
++
++void __init time_init(void)
++{
++#ifdef CONFIG_HPET_TIMER
++ if (is_hpet_capable()) {
++ /*
++ * HPET initialization needs to do memory-mapped io. So, let
++ * us do a late initialization after mem_init().
++ */
++ late_time_init = hpet_time_init;
++ return;
++ }
++#endif
++
++ switch (HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, 0,
++ &xen_set_periodic_tick)) {
++ case 0:
++#if CONFIG_XEN_COMPAT <= 0x030004
++ case -ENOSYS:
++#endif
++ break;
++ default:
++ BUG();
++ }
++
++ get_time_values_from_xen(0);
++
++ processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
++ per_cpu(processed_system_time, 0) = processed_system_time;
++ init_missing_ticks_accounting(0);
++
++ update_wallclock();
++
++ init_cpu_khz();
++ printk(KERN_INFO "Xen reported: %u.%03u MHz processor.\n",
++ cpu_khz / 1000, cpu_khz % 1000);
++
++#if defined(__x86_64__)
++ vxtime.mode = VXTIME_TSC;
++ vxtime.quot = (1000000L << 32) / vxtime_hz;
++ vxtime.tsc_quot = (1000L << 32) / cpu_khz;
++ sync_core();
++ rdtscll(vxtime.last_tsc);
++#endif
++
++ /* Cannot request_irq() until kmem is initialised. */
++ late_time_init = setup_cpu0_timer_irq;
++}
++
++/* Convert jiffies to system time. */
++u64 jiffies_to_st(unsigned long j)
++{
++ unsigned long seq;
++ long delta;
++ u64 st;
++
++ do {
++ seq = read_seqbegin(&xtime_lock);
++ delta = j - jiffies;
++ if (delta < 1) {
++ /* Triggers in some wrap-around cases, but that's okay:
++ * we just end up with a shorter timeout. */
++ st = processed_system_time + NS_PER_TICK;
++ } else if (((unsigned long)delta >> (BITS_PER_LONG-3)) != 0) {
++ /* Very long timeout means there is no pending timer.
++ * We indicate this to Xen by passing zero timeout. */
++ st = 0;
++ } else {
++ st = processed_system_time + delta * (u64)NS_PER_TICK;
++ }
++ } while (read_seqretry(&xtime_lock, seq));
++
++ return st;
++}
++EXPORT_SYMBOL(jiffies_to_st);
++
++/*
++ * stop_hz_timer / start_hz_timer - enter/exit 'tickless mode' on an idle cpu
++ * These functions are based on implementations from arch/s390/kernel/time.c
++ */
++static void stop_hz_timer(void)
++{
++ struct vcpu_set_singleshot_timer singleshot;
++ unsigned int cpu = smp_processor_id();
++ unsigned long j;
++ int rc;
++
++ cpu_set(cpu, nohz_cpu_mask);
++
++ /* See matching smp_mb in rcu_start_batch in rcupdate.c. These mbs */
++ /* ensure that if __rcu_pending (nested in rcu_needs_cpu) fetches a */
++ /* value of rcp->cur that matches rdp->quiescbatch and allows us to */
++ /* stop the hz timer then the cpumasks created for subsequent values */
++ /* of cur in rcu_start_batch are guaranteed to pick up the updated */
++ /* nohz_cpu_mask and so will not depend on this cpu. */
++
++ smp_mb();
++
++ /* Leave ourselves in tick mode if rcu or softirq or timer pending. */
++ if (rcu_needs_cpu(cpu) || local_softirq_pending() ||
++ (j = next_timer_interrupt(), time_before_eq(j, jiffies))) {
++ cpu_clear(cpu, nohz_cpu_mask);
++ j = jiffies + 1;
++ }
++
++ singleshot.timeout_abs_ns = jiffies_to_st(j);
++ singleshot.flags = 0;
++ rc = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &singleshot);
++#if CONFIG_XEN_COMPAT <= 0x030004
++ if (rc) {
++ BUG_ON(rc != -ENOSYS);
++ rc = HYPERVISOR_set_timer_op(singleshot.timeout_abs_ns);
++ }
++#endif
++ BUG_ON(rc);
++}
++
++static void start_hz_timer(void)
++{
++ cpu_clear(smp_processor_id(), nohz_cpu_mask);
++}
++
++void raw_safe_halt(void)
++{
++ stop_hz_timer();
++ /* Blocking includes an implicit local_irq_enable(). */
++ HYPERVISOR_block();
++ start_hz_timer();
++}
++EXPORT_SYMBOL(raw_safe_halt);
++
++void halt(void)
++{
++ if (irqs_disabled())
++ VOID(HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL));
++}
++EXPORT_SYMBOL(halt);
++
++/* No locking required. Interrupts are disabled on all CPUs. */
++void time_resume(void)
++{
++ unsigned int cpu;
++
++ init_cpu_khz();
++
++ for_each_online_cpu(cpu) {
++ switch (HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, cpu,
++ &xen_set_periodic_tick)) {
++ case 0:
++#if CONFIG_XEN_COMPAT <= 0x030004
++ case -ENOSYS:
++#endif
++ break;
++ default:
++ BUG();
++ }
++ get_time_values_from_xen(cpu);
++ per_cpu(processed_system_time, cpu) =
++ per_cpu(shadow_time, 0).system_timestamp;
++ init_missing_ticks_accounting(cpu);
++ }
++
++ processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
++
++ update_wallclock();
++}
++
++#ifdef CONFIG_SMP
++static char timer_name[NR_CPUS][15];
++
++int __cpuinit local_setup_timer(unsigned int cpu)
++{
++ int seq, irq;
++
++ BUG_ON(cpu == 0);
++
++ switch (HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, cpu,
++ &xen_set_periodic_tick)) {
++ case 0:
++#if CONFIG_XEN_COMPAT <= 0x030004
++ case -ENOSYS:
++#endif
++ break;
++ default:
++ BUG();
++ }
++
++ do {
++ seq = read_seqbegin(&xtime_lock);
++ /* Use cpu0 timestamp: cpu's shadow is not initialised yet. */
++ per_cpu(processed_system_time, cpu) =
++ per_cpu(shadow_time, 0).system_timestamp;
++ init_missing_ticks_accounting(cpu);
++ } while (read_seqretry(&xtime_lock, seq));
++
++ sprintf(timer_name[cpu], "timer%u", cpu);
++ irq = bind_virq_to_irqhandler(VIRQ_TIMER,
++ cpu,
++ timer_interrupt,
++ SA_INTERRUPT,
++ timer_name[cpu],
++ NULL);
++ if (irq < 0)
++ return irq;
++ per_cpu(timer_irq, cpu) = irq;
++
++ return 0;
++}
++
++void __cpuexit local_teardown_timer(unsigned int cpu)
++{
++ BUG_ON(cpu == 0);
++ unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
++}
++#endif
++
++#ifdef CONFIG_CPU_FREQ
++static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
++ void *data)
++{
++ struct cpufreq_freqs *freq = data;
++ struct xen_platform_op op;
++
++ if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
++ return 0;
++
++ if (val == CPUFREQ_PRECHANGE)
++ return 0;
++
++ op.cmd = XENPF_change_freq;
++ op.u.change_freq.flags = 0;
++ op.u.change_freq.cpu = freq->cpu;
++ op.u.change_freq.freq = (u64)freq->new * 1000;
++ WARN_ON(HYPERVISOR_platform_op(&op));
++
++ return 0;
++}
++
++static struct notifier_block time_cpufreq_notifier_block = {
++ .notifier_call = time_cpufreq_notifier
++};
++
++static int __init cpufreq_time_setup(void)
++{
++ if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
++ CPUFREQ_TRANSITION_NOTIFIER)) {
++ printk(KERN_ERR "failed to set up cpufreq notifier\n");
++ return -ENODEV;
++ }
++ return 0;
++}
++
++core_initcall(cpufreq_time_setup);
++#endif
++
++/*
++ * /proc/sys/xen: This really belongs in another file. It can stay here for
++ * now however.
++ */
++static ctl_table xen_subtable[] = {
++ {
++ .ctl_name = 1,
++ .procname = "independent_wallclock",
++ .data = &independent_wallclock,
++ .maxlen = sizeof(independent_wallclock),
++ .mode = 0644,
++ .proc_handler = proc_dointvec
++ },
++ {
++ .ctl_name = 2,
++ .procname = "permitted_clock_jitter",
++ .data = &permitted_clock_jitter,
++ .maxlen = sizeof(permitted_clock_jitter),
++ .mode = 0644,
++ .proc_handler = proc_doulongvec_minmax
++ },
++ { 0 }
++};
++static ctl_table xen_table[] = {
++ {
++ .ctl_name = 123,
++ .procname = "xen",
++ .mode = 0555,
++ .child = xen_subtable},
++ { 0 }
++};
++static int __init xen_sysctl_init(void)
++{
++ (void)register_sysctl_table(xen_table, 0);
++ return 0;
++}
++__initcall(xen_sysctl_init);
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/traps-xen.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/traps-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/traps-xen.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/traps-xen.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,1190 @@
++/*
++ * linux/arch/i386/traps.c
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ *
++ * Pentium III FXSR, SSE support
++ * Gareth Hughes <gareth@valinux.com>, May 2000
++ */
++
++/*
++ * 'Traps.c' handles hardware traps and faults after we have saved some
++ * state in 'asm.s'.
++ */
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/timer.h>
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/interrupt.h>
++#include <linux/highmem.h>
++#include <linux/kallsyms.h>
++#include <linux/ptrace.h>
++#include <linux/utsname.h>
++#include <linux/kprobes.h>
++#include <linux/kexec.h>
++#include <linux/unwind.h>
++
++#ifdef CONFIG_EISA
++#include <linux/ioport.h>
++#include <linux/eisa.h>
++#endif
++
++#ifdef CONFIG_MCA
++#include <linux/mca.h>
++#endif
++
++#include <asm/processor.h>
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/io.h>
++#include <asm/atomic.h>
++#include <asm/debugreg.h>
++#include <asm/desc.h>
++#include <asm/i387.h>
++#include <asm/nmi.h>
++#include <asm/unwind.h>
++#include <asm/smp.h>
++#include <asm/arch_hooks.h>
++#include <asm/kdebug.h>
++
++#include <linux/module.h>
++
++#include "mach_traps.h"
++
++asmlinkage int system_call(void);
++
++struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
++ { 0, 0 }, { 0, 0 } };
++
++/* Do we ignore FPU interrupts ? */
++char ignore_fpu_irq = 0;
++
++#ifndef CONFIG_X86_NO_IDT
++/*
++ * The IDT has to be page-aligned to simplify the Pentium
++ * F0 0F bug workaround.. We have a special link segment
++ * for this.
++ */
++struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
++#endif
++
++asmlinkage void divide_error(void);
++asmlinkage void debug(void);
++asmlinkage void nmi(void);
++asmlinkage void int3(void);
++asmlinkage void overflow(void);
++asmlinkage void bounds(void);
++asmlinkage void invalid_op(void);
++asmlinkage void device_not_available(void);
++asmlinkage void coprocessor_segment_overrun(void);
++asmlinkage void invalid_TSS(void);
++asmlinkage void segment_not_present(void);
++asmlinkage void stack_segment(void);
++asmlinkage void general_protection(void);
++asmlinkage void page_fault(void);
++asmlinkage void coprocessor_error(void);
++asmlinkage void simd_coprocessor_error(void);
++asmlinkage void alignment_check(void);
++#ifndef CONFIG_XEN
++asmlinkage void spurious_interrupt_bug(void);
++#else
++asmlinkage void fixup_4gb_segment(void);
++#endif
++asmlinkage void machine_check(void);
++
++static int kstack_depth_to_print = 24;
++#ifdef CONFIG_STACK_UNWIND
++static int call_trace = 1;
++#else
++#define call_trace (-1)
++#endif
++ATOMIC_NOTIFIER_HEAD(i386die_chain);
++
++int register_die_notifier(struct notifier_block *nb)
++{
++ vmalloc_sync_all();
++ return atomic_notifier_chain_register(&i386die_chain, nb);
++}
++EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */
++
++int unregister_die_notifier(struct notifier_block *nb)
++{
++ return atomic_notifier_chain_unregister(&i386die_chain, nb);
++}
++EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */
++
++static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
++{
++ return p > (void *)tinfo &&
++ p < (void *)tinfo + THREAD_SIZE - 3;
++}
++
++/*
++ * Print one address/symbol entries per line.
++ */
++static inline void print_addr_and_symbol(unsigned long addr, char *log_lvl)
++{
++ printk(" [<%08lx>] ", addr);
++
++ print_symbol("%s\n", addr);
++}
++
++static inline unsigned long print_context_stack(struct thread_info *tinfo,
++ unsigned long *stack, unsigned long ebp,
++ char *log_lvl)
++{
++ unsigned long addr;
++
++#ifdef CONFIG_FRAME_POINTER
++ while (valid_stack_ptr(tinfo, (void *)ebp)) {
++ addr = *(unsigned long *)(ebp + 4);
++ print_addr_and_symbol(addr, log_lvl);
++ /*
++ * break out of recursive entries (such as
++ * end_of_stack_stop_unwind_function):
++ */
++ if (ebp == *(unsigned long *)ebp)
++ break;
++ ebp = *(unsigned long *)ebp;
++ }
++#else
++ while (valid_stack_ptr(tinfo, stack)) {
++ addr = *stack++;
++ if (__kernel_text_address(addr))
++ print_addr_and_symbol(addr, log_lvl);
++ }
++#endif
++ return ebp;
++}
++
++static asmlinkage int
++show_trace_unwind(struct unwind_frame_info *info, void *log_lvl)
++{
++ int n = 0;
++
++ while (unwind(info) == 0 && UNW_PC(info)) {
++ n++;
++ print_addr_and_symbol(UNW_PC(info), log_lvl);
++ if (arch_unw_user_mode(info))
++ break;
++ }
++ return n;
++}
++
++static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
++ unsigned long *stack, char *log_lvl)
++{
++ unsigned long ebp;
++
++ if (!task)
++ task = current;
++
++ if (call_trace >= 0) {
++ int unw_ret = 0;
++ struct unwind_frame_info info;
++
++ if (regs) {
++ if (unwind_init_frame_info(&info, task, regs) == 0)
++ unw_ret = show_trace_unwind(&info, log_lvl);
++ } else if (task == current)
++ unw_ret = unwind_init_running(&info, show_trace_unwind, log_lvl);
++ else {
++ if (unwind_init_blocked(&info, task) == 0)
++ unw_ret = show_trace_unwind(&info, log_lvl);
++ }
++ if (unw_ret > 0) {
++ if (call_trace == 1 && !arch_unw_user_mode(&info)) {
++ print_symbol("DWARF2 unwinder stuck at %s\n",
++ UNW_PC(&info));
++ if (UNW_SP(&info) >= PAGE_OFFSET) {
++ printk("Leftover inexact backtrace:\n");
++ stack = (void *)UNW_SP(&info);
++ } else
++ printk("Full inexact backtrace again:\n");
++ } else if (call_trace >= 1)
++ return;
++ else
++ printk("Full inexact backtrace again:\n");
++ } else
++ printk("Inexact backtrace:\n");
++ }
++
++ if (task == current) {
++ /* Grab ebp right from our regs */
++ asm ("movl %%ebp, %0" : "=r" (ebp) : );
++ } else {
++ /* ebp is the last reg pushed by switch_to */
++ ebp = *(unsigned long *) task->thread.esp;
++ }
++
++ while (1) {
++ struct thread_info *context;
++ context = (struct thread_info *)
++ ((unsigned long)stack & (~(THREAD_SIZE - 1)));
++ ebp = print_context_stack(context, stack, ebp, log_lvl);
++ stack = (unsigned long*)context->previous_esp;
++ if (!stack)
++ break;
++ printk("%s =======================\n", log_lvl);
++ }
++}
++
++void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long * stack)
++{
++ show_trace_log_lvl(task, regs, stack, "");
++}
++
++static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
++ unsigned long *esp, char *log_lvl)
++{
++ unsigned long *stack;
++ int i;
++
++ if (esp == NULL) {
++ if (task)
++ esp = (unsigned long*)task->thread.esp;
++ else
++ esp = (unsigned long *)&esp;
++ }
++
++ stack = esp;
++ for(i = 0; i < kstack_depth_to_print; i++) {
++ if (kstack_end(stack))
++ break;
++ if (i && ((i % 8) == 0))
++ printk("\n%s ", log_lvl);
++ printk("%08lx ", *stack++);
++ }
++ printk("\n%sCall Trace:\n", log_lvl);
++ show_trace_log_lvl(task, regs, esp, log_lvl);
++}
++
++void show_stack(struct task_struct *task, unsigned long *esp)
++{
++ printk(" ");
++ show_stack_log_lvl(task, NULL, esp, "");
++}
++
++/*
++ * The architecture-independent dump_stack generator
++ */
++void dump_stack(void)
++{
++ unsigned long stack;
++
++ show_trace(current, NULL, &stack);
++}
++
++EXPORT_SYMBOL(dump_stack);
++
++void show_registers(struct pt_regs *regs)
++{
++ int i;
++ int in_kernel = 1;
++ unsigned long esp;
++ unsigned short ss;
++
++ esp = (unsigned long) (&regs->esp);
++ savesegment(ss, ss);
++ if (user_mode_vm(regs)) {
++ in_kernel = 0;
++ esp = regs->esp;
++ ss = regs->xss & 0xffff;
++ }
++ print_modules();
++ printk(KERN_EMERG "CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\n"
++ "EFLAGS: %08lx (%s %.*s) \n",
++ smp_processor_id(), 0xffff & regs->xcs, regs->eip,
++ print_tainted(), regs->eflags, system_utsname.release,
++ (int)strcspn(system_utsname.version, " "),
++ system_utsname.version);
++ print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
++ printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
++ regs->eax, regs->ebx, regs->ecx, regs->edx);
++ printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
++ regs->esi, regs->edi, regs->ebp, esp);
++ printk(KERN_EMERG "ds: %04x es: %04x ss: %04x\n",
++ regs->xds & 0xffff, regs->xes & 0xffff, ss);
++ printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
++ TASK_COMM_LEN, current->comm, current->pid,
++ current_thread_info(), current, current->thread_info);
++ /*
++ * When in-kernel, we also print out the stack and code at the
++ * time of the fault..
++ */
++ if (in_kernel) {
++ u8 __user *eip;
++
++ printk("\n" KERN_EMERG "Stack: ");
++ show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG);
++
++ printk(KERN_EMERG "Code: ");
++
++ eip = (u8 __user *)regs->eip - 43;
++ for (i = 0; i < 64; i++, eip++) {
++ unsigned char c;
++
++ if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) {
++ printk(" Bad EIP value.");
++ break;
++ }
++ if (eip == (u8 __user *)regs->eip)
++ printk("<%02x> ", c);
++ else
++ printk("%02x ", c);
++ }
++ }
++ printk("\n");
++}
++
++static void handle_BUG(struct pt_regs *regs)
++{
++ unsigned long eip = regs->eip;
++ unsigned short ud2;
++
++ if (eip < PAGE_OFFSET)
++ return;
++ if (__get_user(ud2, (unsigned short __user *)eip))
++ return;
++ if (ud2 != 0x0b0f)
++ return;
++
++ printk(KERN_EMERG "------------[ cut here ]------------\n");
++
++#ifdef CONFIG_DEBUG_BUGVERBOSE
++ do {
++ unsigned short line;
++ char *file;
++ char c;
++
++ if (__get_user(line, (unsigned short __user *)(eip + 2)))
++ break;
++ if (__get_user(file, (char * __user *)(eip + 4)) ||
++ (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
++ file = "<bad filename>";
++
++ printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
++ return;
++ } while (0);
++#endif
++ printk(KERN_EMERG "Kernel BUG at [verbose debug info unavailable]\n");
++}
++
++/* This is gone through when something in the kernel
++ * has done something bad and is about to be terminated.
++*/
++void die(const char * str, struct pt_regs * regs, long err)
++{
++ static struct {
++ spinlock_t lock;
++ u32 lock_owner;
++ int lock_owner_depth;
++ } die = {
++ .lock = SPIN_LOCK_UNLOCKED,
++ .lock_owner = -1,
++ .lock_owner_depth = 0
++ };
++ static int die_counter;
++ unsigned long flags;
++
++ oops_enter();
++
++ if (die.lock_owner != raw_smp_processor_id()) {
++ console_verbose();
++ spin_lock_irqsave(&die.lock, flags);
++ die.lock_owner = smp_processor_id();
++ die.lock_owner_depth = 0;
++ bust_spinlocks(1);
++ }
++ else
++ local_save_flags(flags);
++
++ if (++die.lock_owner_depth < 3) {
++ int nl = 0;
++ unsigned long esp;
++ unsigned short ss;
++
++ handle_BUG(regs);
++ printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
++#ifdef CONFIG_PREEMPT
++ printk(KERN_EMERG "PREEMPT ");
++ nl = 1;
++#endif
++#ifdef CONFIG_SMP
++ if (!nl)
++ printk(KERN_EMERG);
++ printk("SMP ");
++ nl = 1;
++#endif
++#ifdef CONFIG_DEBUG_PAGEALLOC
++ if (!nl)
++ printk(KERN_EMERG);
++ printk("DEBUG_PAGEALLOC");
++ nl = 1;
++#endif
++ if (nl)
++ printk("\n");
++ if (notify_die(DIE_OOPS, str, regs, err,
++ current->thread.trap_no, SIGSEGV) !=
++ NOTIFY_STOP) {
++ show_registers(regs);
++ /* Executive summary in case the oops scrolled away */
++ esp = (unsigned long) (&regs->esp);
++ savesegment(ss, ss);
++ if (user_mode(regs)) {
++ esp = regs->esp;
++ ss = regs->xss & 0xffff;
++ }
++ printk(KERN_EMERG "EIP: [<%08lx>] ", regs->eip);
++ print_symbol("%s", regs->eip);
++ printk(" SS:ESP %04x:%08lx\n", ss, esp);
++ }
++ else
++ regs = NULL;
++ } else
++ printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
++
++ bust_spinlocks(0);
++ die.lock_owner = -1;
++ spin_unlock_irqrestore(&die.lock, flags);
++
++ if (!regs)
++ return;
++
++ if (kexec_should_crash(current))
++ crash_kexec(regs);
++
++ if (in_interrupt())
++ panic("Fatal exception in interrupt");
++
++ if (panic_on_oops)
++ panic("Fatal exception");
++
++ oops_exit();
++ do_exit(SIGSEGV);
++}
++
++static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
++{
++ if (!user_mode_vm(regs))
++ die(str, regs, err);
++}
++
++static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
++ struct pt_regs * regs, long error_code,
++ siginfo_t *info)
++{
++ struct task_struct *tsk = current;
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = trapnr;
++
++ if (regs->eflags & VM_MASK) {
++ if (vm86)
++ goto vm86_trap;
++ goto trap_signal;
++ }
++
++ if (!user_mode(regs))
++ goto kernel_trap;
++
++ trap_signal: {
++ if (info)
++ force_sig_info(signr, info, tsk);
++ else
++ force_sig(signr, tsk);
++ return;
++ }
++
++ kernel_trap: {
++ if (!fixup_exception(regs))
++ die(str, regs, error_code);
++ return;
++ }
++
++ vm86_trap: {
++ int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
++ if (ret) goto trap_signal;
++ return;
++ }
++}
++
++#define DO_ERROR(trapnr, signr, str, name) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++ == NOTIFY_STOP) \
++ return; \
++ do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
++}
++
++#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++ siginfo_t info; \
++ info.si_signo = signr; \
++ info.si_errno = 0; \
++ info.si_code = sicode; \
++ info.si_addr = (void __user *)siaddr; \
++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++ == NOTIFY_STOP) \
++ return; \
++ do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
++}
++
++#define DO_VM86_ERROR(trapnr, signr, str, name) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++ == NOTIFY_STOP) \
++ return; \
++ do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
++}
++
++#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++ siginfo_t info; \
++ info.si_signo = signr; \
++ info.si_errno = 0; \
++ info.si_code = sicode; \
++ info.si_addr = (void __user *)siaddr; \
++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++ == NOTIFY_STOP) \
++ return; \
++ do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
++}
++
++DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
++#ifndef CONFIG_KPROBES
++DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
++#endif
++DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
++DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
++DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip)
++DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
++DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
++DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
++DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
++DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
++DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
++
++fastcall void __kprobes do_general_protection(struct pt_regs * regs,
++ long error_code)
++{
++ current->thread.error_code = error_code;
++ current->thread.trap_no = 13;
++
++ if (regs->eflags & VM_MASK)
++ goto gp_in_vm86;
++
++ if (!user_mode(regs))
++ goto gp_in_kernel;
++
++ current->thread.error_code = error_code;
++ current->thread.trap_no = 13;
++ force_sig(SIGSEGV, current);
++ return;
++
++gp_in_vm86:
++ local_irq_enable();
++ handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
++ return;
++
++gp_in_kernel:
++ if (!fixup_exception(regs)) {
++ if (notify_die(DIE_GPF, "general protection fault", regs,
++ error_code, 13, SIGSEGV) == NOTIFY_STOP)
++ return;
++ die("general protection fault", regs, error_code);
++ }
++}
++
++static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
++{
++ printk(KERN_EMERG "Uhhuh. NMI received. Dazed and confused, but trying "
++ "to continue\n");
++ printk(KERN_EMERG "You probably have a hardware problem with your RAM "
++ "chips\n");
++
++ /* Clear and disable the memory parity error line. */
++ clear_mem_error(reason);
++}
++
++static void io_check_error(unsigned char reason, struct pt_regs * regs)
++{
++ printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
++ show_registers(regs);
++
++ /* Re-enable the IOCK line, wait for a few seconds */
++ clear_io_check_error(reason);
++}
++
++static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
++{
++#ifdef CONFIG_MCA
++ /* Might actually be able to figure out what the guilty party
++ * is. */
++ if( MCA_bus ) {
++ mca_handle_nmi();
++ return;
++ }
++#endif
++ printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
++ reason, smp_processor_id());
++ printk("Dazed and confused, but trying to continue\n");
++ printk("Do you have a strange power saving mode enabled?\n");
++}
++
++static DEFINE_SPINLOCK(nmi_print_lock);
++
++void die_nmi (struct pt_regs *regs, const char *msg)
++{
++ if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) ==
++ NOTIFY_STOP)
++ return;
++
++ spin_lock(&nmi_print_lock);
++ /*
++ * We are in trouble anyway, lets at least try
++ * to get a message out.
++ */
++ bust_spinlocks(1);
++ printk(KERN_EMERG "%s", msg);
++ printk(" on CPU%d, eip %08lx, registers:\n",
++ smp_processor_id(), regs->eip);
++ show_registers(regs);
++ printk(KERN_EMERG "console shuts up ...\n");
++ console_silent();
++ spin_unlock(&nmi_print_lock);
++ bust_spinlocks(0);
++
++ /* If we are in kernel we are probably nested up pretty bad
++ * and might aswell get out now while we still can.
++ */
++ if (!user_mode_vm(regs)) {
++ current->thread.trap_no = 2;
++ crash_kexec(regs);
++ }
++
++ do_exit(SIGSEGV);
++}
++
++static void default_do_nmi(struct pt_regs * regs)
++{
++ unsigned char reason = 0;
++
++ /* Only the BSP gets external NMIs from the system. */
++ if (!smp_processor_id())
++ reason = get_nmi_reason();
++
++ if (!(reason & 0xc0)) {
++ if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
++ == NOTIFY_STOP)
++ return;
++#ifdef CONFIG_X86_LOCAL_APIC
++ /*
++ * Ok, so this is none of the documented NMI sources,
++ * so it must be the NMI watchdog.
++ */
++ if (nmi_watchdog) {
++ nmi_watchdog_tick(regs);
++ return;
++ }
++#endif
++ unknown_nmi_error(reason, regs);
++ return;
++ }
++ if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
++ return;
++ if (reason & 0x80)
++ mem_parity_error(reason, regs);
++ if (reason & 0x40)
++ io_check_error(reason, regs);
++ /*
++ * Reassert NMI in case it became active meanwhile
++ * as it's edge-triggered.
++ */
++ reassert_nmi();
++}
++
++static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
++{
++ return 0;
++}
++
++static nmi_callback_t nmi_callback = dummy_nmi_callback;
++
++fastcall void do_nmi(struct pt_regs * regs, long error_code)
++{
++ int cpu;
++
++ nmi_enter();
++
++ cpu = smp_processor_id();
++
++ ++nmi_count(cpu);
++
++ if (!rcu_dereference(nmi_callback)(regs, cpu))
++ default_do_nmi(regs);
++
++ nmi_exit();
++}
++
++void set_nmi_callback(nmi_callback_t callback)
++{
++ vmalloc_sync_all();
++ rcu_assign_pointer(nmi_callback, callback);
++}
++EXPORT_SYMBOL_GPL(set_nmi_callback);
++
++void unset_nmi_callback(void)
++{
++ nmi_callback = dummy_nmi_callback;
++}
++EXPORT_SYMBOL_GPL(unset_nmi_callback);
++
++#ifdef CONFIG_KPROBES
++fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
++{
++ if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
++ == NOTIFY_STOP)
++ return;
++ /* This is an interrupt gate, because kprobes wants interrupts
++ disabled. Normal trap handlers don't. */
++ restore_interrupts(regs);
++ do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
++}
++#endif
++
++/*
++ * Our handling of the processor debug registers is non-trivial.
++ * We do not clear them on entry and exit from the kernel. Therefore
++ * it is possible to get a watchpoint trap here from inside the kernel.
++ * However, the code in ./ptrace.c has ensured that the user can
++ * only set watchpoints on userspace addresses. Therefore the in-kernel
++ * watchpoint trap can only occur in code which is reading/writing
++ * from user space. Such code must not hold kernel locks (since it
++ * can equally take a page fault), therefore it is safe to call
++ * force_sig_info even though that claims and releases locks.
++ *
++ * Code in ./signal.c ensures that the debug control register
++ * is restored before we deliver any signal, and therefore that
++ * user code runs with the correct debug control register even though
++ * we clear it here.
++ *
++ * Being careful here means that we don't have to be as careful in a
++ * lot of more complicated places (task switching can be a bit lazy
++ * about restoring all the debug state, and ptrace doesn't have to
++ * find every occurrence of the TF bit that could be saved away even
++ * by user code)
++ */
++fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
++{
++ unsigned int condition;
++ struct task_struct *tsk = current;
++
++ get_debugreg(condition, 6);
++
++ if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
++ SIGTRAP) == NOTIFY_STOP)
++ return;
++ /* It's safe to allow irq's after DR6 has been saved */
++ if (regs->eflags & X86_EFLAGS_IF)
++ local_irq_enable();
++
++ /* Mask out spurious debug traps due to lazy DR7 setting */
++ if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
++ if (!tsk->thread.debugreg[7])
++ goto clear_dr7;
++ }
++
++ if (regs->eflags & VM_MASK)
++ goto debug_vm86;
++
++ /* Save debug status register where ptrace can see it */
++ tsk->thread.debugreg[6] = condition;
++
++ /*
++ * Single-stepping through TF: make sure we ignore any events in
++ * kernel space (but re-enable TF when returning to user mode).
++ */
++ if (condition & DR_STEP) {
++ /*
++ * We already checked v86 mode above, so we can
++ * check for kernel mode by just checking the CPL
++ * of CS.
++ */
++ if (!user_mode(regs))
++ goto clear_TF_reenable;
++ }
++
++ /* Ok, finally something we can handle */
++ send_sigtrap(tsk, regs, error_code);
++
++ /* Disable additional traps. They'll be re-enabled when
++ * the signal is delivered.
++ */
++clear_dr7:
++ set_debugreg(0, 7);
++ return;
++
++debug_vm86:
++ handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
++ return;
++
++clear_TF_reenable:
++ set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
++ regs->eflags &= ~TF_MASK;
++ return;
++}
++
++/*
++ * Note that we play around with the 'TS' bit in an attempt to get
++ * the correct behaviour even in the presence of the asynchronous
++ * IRQ13 behaviour
++ */
++void math_error(void __user *eip)
++{
++ struct task_struct * task;
++ siginfo_t info;
++ unsigned short cwd, swd;
++
++ /*
++ * Save the info for the exception handler and clear the error.
++ */
++ task = current;
++ save_init_fpu(task);
++ task->thread.trap_no = 16;
++ task->thread.error_code = 0;
++ info.si_signo = SIGFPE;
++ info.si_errno = 0;
++ info.si_code = __SI_FAULT;
++ info.si_addr = eip;
++ /*
++ * (~cwd & swd) will mask out exceptions that are not set to unmasked
++ * status. 0x3f is the exception bits in these regs, 0x200 is the
++ * C1 reg you need in case of a stack fault, 0x040 is the stack
++ * fault bit. We should only be taking one exception at a time,
++ * so if this combination doesn't produce any single exception,
++ * then we have a bad program that isn't syncronizing its FPU usage
++ * and it will suffer the consequences since we won't be able to
++ * fully reproduce the context of the exception
++ */
++ cwd = get_fpu_cwd(task);
++ swd = get_fpu_swd(task);
++ switch (swd & ~cwd & 0x3f) {
++ case 0x000: /* No unmasked exception */
++ return;
++ default: /* Multiple exceptions */
++ break;
++ case 0x001: /* Invalid Op */
++ /*
++ * swd & 0x240 == 0x040: Stack Underflow
++ * swd & 0x240 == 0x240: Stack Overflow
++ * User must clear the SF bit (0x40) if set
++ */
++ info.si_code = FPE_FLTINV;
++ break;
++ case 0x002: /* Denormalize */
++ case 0x010: /* Underflow */
++ info.si_code = FPE_FLTUND;
++ break;
++ case 0x004: /* Zero Divide */
++ info.si_code = FPE_FLTDIV;
++ break;
++ case 0x008: /* Overflow */
++ info.si_code = FPE_FLTOVF;
++ break;
++ case 0x020: /* Precision */
++ info.si_code = FPE_FLTRES;
++ break;
++ }
++ force_sig_info(SIGFPE, &info, task);
++}
++
++fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
++{
++ ignore_fpu_irq = 1;
++ math_error((void __user *)regs->eip);
++}
++
++static void simd_math_error(void __user *eip)
++{
++ struct task_struct * task;
++ siginfo_t info;
++ unsigned short mxcsr;
++
++ /*
++ * Save the info for the exception handler and clear the error.
++ */
++ task = current;
++ save_init_fpu(task);
++ task->thread.trap_no = 19;
++ task->thread.error_code = 0;
++ info.si_signo = SIGFPE;
++ info.si_errno = 0;
++ info.si_code = __SI_FAULT;
++ info.si_addr = eip;
++ /*
++ * The SIMD FPU exceptions are handled a little differently, as there
++ * is only a single status/control register. Thus, to determine which
++ * unmasked exception was caught we must mask the exception mask bits
++ * at 0x1f80, and then use these to mask the exception bits at 0x3f.
++ */
++ mxcsr = get_fpu_mxcsr(task);
++ switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
++ case 0x000:
++ default:
++ break;
++ case 0x001: /* Invalid Op */
++ info.si_code = FPE_FLTINV;
++ break;
++ case 0x002: /* Denormalize */
++ case 0x010: /* Underflow */
++ info.si_code = FPE_FLTUND;
++ break;
++ case 0x004: /* Zero Divide */
++ info.si_code = FPE_FLTDIV;
++ break;
++ case 0x008: /* Overflow */
++ info.si_code = FPE_FLTOVF;
++ break;
++ case 0x020: /* Precision */
++ info.si_code = FPE_FLTRES;
++ break;
++ }
++ force_sig_info(SIGFPE, &info, task);
++}
++
++fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
++ long error_code)
++{
++ if (cpu_has_xmm) {
++ /* Handle SIMD FPU exceptions on PIII+ processors. */
++ ignore_fpu_irq = 1;
++ simd_math_error((void __user *)regs->eip);
++ } else {
++ /*
++ * Handle strange cache flush from user space exception
++ * in all other cases. This is undocumented behaviour.
++ */
++ if (regs->eflags & VM_MASK) {
++ handle_vm86_fault((struct kernel_vm86_regs *)regs,
++ error_code);
++ return;
++ }
++ current->thread.trap_no = 19;
++ current->thread.error_code = error_code;
++ die_if_kernel("cache flush denied", regs, error_code);
++ force_sig(SIGSEGV, current);
++ }
++}
++
++#ifndef CONFIG_XEN
++fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
++ long error_code)
++{
++#if 0
++ /* No need to warn about this any longer. */
++ printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
++#endif
++}
++
++fastcall void setup_x86_bogus_stack(unsigned char * stk)
++{
++ unsigned long *switch16_ptr, *switch32_ptr;
++ struct pt_regs *regs;
++ unsigned long stack_top, stack_bot;
++ unsigned short iret_frame16_off;
++ int cpu = smp_processor_id();
++ /* reserve the space on 32bit stack for the magic switch16 pointer */
++ memmove(stk, stk + 8, sizeof(struct pt_regs));
++ switch16_ptr = (unsigned long *)(stk + sizeof(struct pt_regs));
++ regs = (struct pt_regs *)stk;
++ /* now the switch32 on 16bit stack */
++ stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
++ stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
++ switch32_ptr = (unsigned long *)(stack_top - 8);
++ iret_frame16_off = CPU_16BIT_STACK_SIZE - 8 - 20;
++ /* copy iret frame on 16bit stack */
++ memcpy((void *)(stack_bot + iret_frame16_off), &regs->eip, 20);
++ /* fill in the switch pointers */
++ switch16_ptr[0] = (regs->esp & 0xffff0000) | iret_frame16_off;
++ switch16_ptr[1] = __ESPFIX_SS;
++ switch32_ptr[0] = (unsigned long)stk + sizeof(struct pt_regs) +
++ 8 - CPU_16BIT_STACK_SIZE;
++ switch32_ptr[1] = __KERNEL_DS;
++}
++
++fastcall unsigned char * fixup_x86_bogus_stack(unsigned short sp)
++{
++ unsigned long *switch32_ptr;
++ unsigned char *stack16, *stack32;
++ unsigned long stack_top, stack_bot;
++ int len;
++ int cpu = smp_processor_id();
++ stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
++ stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
++ switch32_ptr = (unsigned long *)(stack_top - 8);
++ /* copy the data from 16bit stack to 32bit stack */
++ len = CPU_16BIT_STACK_SIZE - 8 - sp;
++ stack16 = (unsigned char *)(stack_bot + sp);
++ stack32 = (unsigned char *)
++ (switch32_ptr[0] + CPU_16BIT_STACK_SIZE - 8 - len);
++ memcpy(stack32, stack16, len);
++ return stack32;
++}
++#endif
++
++/*
++ * 'math_state_restore()' saves the current math information in the
++ * old math state array, and gets the new ones from the current task
++ *
++ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
++ * Don't touch unless you *really* know how it works.
++ *
++ * Must be called with kernel preemption disabled (in this case,
++ * local interrupts are disabled at the call-site in entry.S).
++ */
++asmlinkage void math_state_restore(struct pt_regs regs)
++{
++ struct thread_info *thread = current_thread_info();
++ struct task_struct *tsk = thread->task;
++
++ /* NB. 'clts' is done for us by Xen during virtual trap. */
++ if (!tsk_used_math(tsk))
++ init_fpu(tsk);
++ restore_fpu(tsk);
++ thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
++}
++
++#ifndef CONFIG_MATH_EMULATION
++
++asmlinkage void math_emulate(long arg)
++{
++ printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n");
++ printk(KERN_EMERG "killing %s.\n",current->comm);
++ force_sig(SIGFPE,current);
++ schedule();
++}
++
++#endif /* CONFIG_MATH_EMULATION */
++
++#ifdef CONFIG_X86_F00F_BUG
++void __init trap_init_f00f_bug(void)
++{
++ __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
++
++ /*
++ * Update the IDT descriptor and reload the IDT so that
++ * it uses the read-only mapped virtual address.
++ */
++ idt_descr.address = fix_to_virt(FIX_F00F_IDT);
++ load_idt(&idt_descr);
++}
++#endif
++
++
++/*
++ * NB. All these are "trap gates" (i.e. events_mask isn't set) except
++ * for those that specify <dpl>|4 in the second field.
++ */
++static trap_info_t trap_table[] = {
++ { 0, 0, __KERNEL_CS, (unsigned long)divide_error },
++ { 1, 0|4, __KERNEL_CS, (unsigned long)debug },
++ { 3, 3|4, __KERNEL_CS, (unsigned long)int3 },
++ { 4, 3, __KERNEL_CS, (unsigned long)overflow },
++ { 5, 0, __KERNEL_CS, (unsigned long)bounds },
++ { 6, 0, __KERNEL_CS, (unsigned long)invalid_op },
++ { 7, 0|4, __KERNEL_CS, (unsigned long)device_not_available },
++ { 9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
++ { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS },
++ { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present },
++ { 12, 0, __KERNEL_CS, (unsigned long)stack_segment },
++ { 13, 0, __KERNEL_CS, (unsigned long)general_protection },
++ { 14, 0|4, __KERNEL_CS, (unsigned long)page_fault },
++ { 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment },
++ { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error },
++ { 17, 0, __KERNEL_CS, (unsigned long)alignment_check },
++#ifdef CONFIG_X86_MCE
++ { 18, 0, __KERNEL_CS, (unsigned long)machine_check },
++#endif
++ { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
++ { SYSCALL_VECTOR, 3, __KERNEL_CS, (unsigned long)system_call },
++ { 0, 0, 0, 0 }
++};
++
++void __init trap_init(void)
++{
++ int ret;
++
++ ret = HYPERVISOR_set_trap_table(trap_table);
++ if (ret)
++ printk("HYPERVISOR_set_trap_table failed: error %d\n", ret);
++
++ if (cpu_has_fxsr) {
++ /*
++ * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
++ * Generates a compile-time "error: zero width for bit-field" if
++ * the alignment is wrong.
++ */
++ struct fxsrAlignAssert {
++ int _:!(offsetof(struct task_struct,
++ thread.i387.fxsave) & 15);
++ };
++
++ printk(KERN_INFO "Enabling fast FPU save and restore... ");
++ set_in_cr4(X86_CR4_OSFXSR);
++ printk("done.\n");
++ }
++ if (cpu_has_xmm) {
++ printk(KERN_INFO "Enabling unmasked SIMD FPU exception "
++ "support... ");
++ set_in_cr4(X86_CR4_OSXMMEXCPT);
++ printk("done.\n");
++ }
++
++ /*
++ * Should be a barrier for any external CPU state.
++ */
++ cpu_init();
++}
++
++void smp_trap_init(trap_info_t *trap_ctxt)
++{
++ const trap_info_t *t = trap_table;
++
++ for (t = trap_table; t->address; t++) {
++ trap_ctxt[t->vector].flags = t->flags;
++ trap_ctxt[t->vector].cs = t->cs;
++ trap_ctxt[t->vector].address = t->address;
++ }
++}
++
++static int __init kstack_setup(char *s)
++{
++ kstack_depth_to_print = simple_strtoul(s, NULL, 0);
++ return 1;
++}
++__setup("kstack=", kstack_setup);
++
++#ifdef CONFIG_STACK_UNWIND
++static int __init call_trace_setup(char *s)
++{
++ if (strcmp(s, "old") == 0)
++ call_trace = -1;
++ else if (strcmp(s, "both") == 0)
++ call_trace = 0;
++ else if (strcmp(s, "newfallback") == 0)
++ call_trace = 1;
++ else if (strcmp(s, "new") == 2)
++ call_trace = 2;
++ return 1;
++}
++__setup("call_trace=", call_trace_setup);
++#endif
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/traps.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/traps.c
+--- linux-2.6.18.8/arch/i386/kernel/traps.c 2007-02-23 15:52:30.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/traps.c 2008-02-15 16:21:49.000000000 -0800
+@@ -642,18 +642,11 @@ static void mem_parity_error(unsigned ch
+
+ static void io_check_error(unsigned char reason, struct pt_regs * regs)
+ {
+- unsigned long i;
+-
+ printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
+ show_registers(regs);
+
+ /* Re-enable the IOCK line, wait for a few seconds */
+- reason = (reason & 0xf) | 8;
+- outb(reason, 0x61);
+- i = 2000;
+- while (--i) udelay(1000);
+- reason &= ~8;
+- outb(reason, 0x61);
++ clear_io_check_error(reason);
+ }
+
+ static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/vm86.c linux-2.6.18-xen-3.2.0/arch/i386/kernel/vm86.c
+--- linux-2.6.18.8/arch/i386/kernel/vm86.c 2007-02-23 15:52:30.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/vm86.c 2008-02-15 16:21:49.000000000 -0800
+@@ -97,7 +97,9 @@
+ struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs));
+ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
+ {
++#ifndef CONFIG_X86_NO_TSS
+ struct tss_struct *tss;
++#endif
+ struct pt_regs *ret;
+ unsigned long tmp;
+
+@@ -122,12 +124,16 @@ struct pt_regs * fastcall save_v86_state
+ do_exit(SIGSEGV);
+ }
+
++#ifndef CONFIG_X86_NO_TSS
+ tss = &per_cpu(init_tss, get_cpu());
++#endif
+ current->thread.esp0 = current->thread.saved_esp0;
+ current->thread.sysenter_cs = __KERNEL_CS;
+ load_esp0(tss, &current->thread);
+ current->thread.saved_esp0 = 0;
++#ifndef CONFIG_X86_NO_TSS
+ put_cpu();
++#endif
+
+ loadsegment(fs, current->thread.saved_fs);
+ loadsegment(gs, current->thread.saved_gs);
+@@ -251,7 +257,9 @@ out:
+
+ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk)
+ {
++#ifndef CONFIG_X86_NO_TSS
+ struct tss_struct *tss;
++#endif
+ long eax;
+ /*
+ * make sure the vm86() system call doesn't try to do anything silly
+@@ -296,12 +304,16 @@ static void do_sys_vm86(struct kernel_vm
+ savesegment(fs, tsk->thread.saved_fs);
+ savesegment(gs, tsk->thread.saved_gs);
+
++#ifndef CONFIG_X86_NO_TSS
+ tss = &per_cpu(init_tss, get_cpu());
++#endif
+ tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
+ if (cpu_has_sep)
+ tsk->thread.sysenter_cs = 0;
+ load_esp0(tss, &tsk->thread);
++#ifndef CONFIG_X86_NO_TSS
+ put_cpu();
++#endif
+
+ tsk->thread.screen_bitmap = info->screen_bitmap;
+ if (info->flags & VM86_SCREEN_BITMAP)
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/vmlinux.lds.S linux-2.6.18-xen-3.2.0/arch/i386/kernel/vmlinux.lds.S
+--- linux-2.6.18.8/arch/i386/kernel/vmlinux.lds.S 2007-02-23 15:52:30.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/vmlinux.lds.S 2008-02-15 16:21:49.000000000 -0800
+@@ -13,6 +13,12 @@ OUTPUT_FORMAT("elf32-i386", "elf32-i386"
+ OUTPUT_ARCH(i386)
+ ENTRY(phys_startup_32)
+ jiffies = jiffies_64;
++
++PHDRS {
++ text PT_LOAD FLAGS(5); /* R_E */
++ data PT_LOAD FLAGS(7); /* RWE */
++ note PT_NOTE FLAGS(4); /* R__ */
++}
+ SECTIONS
+ {
+ . = __KERNEL_START;
+@@ -26,7 +32,7 @@ SECTIONS
+ KPROBES_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+- } = 0x9090
++ } :text = 0x9090
+
+ _etext = .; /* End of text section */
+
+@@ -45,10 +51,11 @@ SECTIONS
+ __tracedata_end = .;
+
+ /* writeable */
++ . = ALIGN(4096);
+ .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */
+ *(.data)
+ CONSTRUCTORS
+- }
++ } :data
+
+ . = ALIGN(4096);
+ __nosave_begin = .;
+@@ -184,4 +191,6 @@ SECTIONS
+ STABS_DEBUG
+
+ DWARF_DEBUG
++
++ NOTES
+ }
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/vsyscall-note-xen.S linux-2.6.18-xen-3.2.0/arch/i386/kernel/vsyscall-note-xen.S
+--- linux-2.6.18.8/arch/i386/kernel/vsyscall-note-xen.S 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/kernel/vsyscall-note-xen.S 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,32 @@
++/*
++ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
++ * Here we can supply some information useful to userland.
++ * First we get the vanilla i386 note that supplies the kernel version info.
++ */
++
++#include "vsyscall-note.S"
++
++/*
++ * Now we add a special note telling glibc's dynamic linker a fake hardware
++ * flavor that it will use to choose the search path for libraries in the
++ * same way it uses real hardware capabilities like "mmx".
++ * We supply "nosegneg" as the fake capability, to indicate that we
++ * do not like negative offsets in instructions using segment overrides,
++ * since we implement those inefficiently. This makes it possible to
++ * install libraries optimized to avoid those access patterns in someplace
++ * like /lib/i686/tls/nosegneg. Note that an /etc/ld.so.conf.d/file
++ * corresponding to the bits here is needed to make ldconfig work right.
++ * It should contain:
++ * hwcap 0 nosegneg
++ * to match the mapping of bit to name that we give here.
++ */
++#define NOTE_KERNELCAP_BEGIN(ncaps, mask) \
++ ASM_ELF_NOTE_BEGIN(".note.kernelcap", "a", "GNU", 2) \
++ .long ncaps, mask
++#define NOTE_KERNELCAP(bit, name) \
++ .byte bit; .asciz name
++#define NOTE_KERNELCAP_END ASM_ELF_NOTE_END
++
++NOTE_KERNELCAP_BEGIN(1, 1)
++NOTE_KERNELCAP(0, "nosegneg")
++NOTE_KERNELCAP_END
+diff -rpuN linux-2.6.18.8/arch/i386/lib/Makefile linux-2.6.18-xen-3.2.0/arch/i386/lib/Makefile
+--- linux-2.6.18.8/arch/i386/lib/Makefile 2007-02-23 15:52:30.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/lib/Makefile 2008-02-15 16:21:49.000000000 -0800
+@@ -7,3 +7,4 @@ lib-y = checksum.o delay.o usercopy.o ge
+ bitops.o
+
+ lib-$(CONFIG_X86_USE_3DNOW) += mmx.o
++lib-$(CONFIG_XEN_SCRUB_PAGES) += scrub.o
+diff -rpuN linux-2.6.18.8/arch/i386/lib/scrub.c linux-2.6.18-xen-3.2.0/arch/i386/lib/scrub.c
+--- linux-2.6.18.8/arch/i386/lib/scrub.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/lib/scrub.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,21 @@
++#include <asm/cpufeature.h>
++#include <asm/page.h>
++#include <asm/processor.h>
++
++void scrub_pages(void *v, unsigned int count)
++{
++ if (likely(cpu_has_xmm2)) {
++ unsigned long n = count * (PAGE_SIZE / sizeof(long) / 4);
++
++ for (; n--; v += sizeof(long) * 4)
++ asm("movnti %1,(%0)\n\t"
++ "movnti %1,%c2(%0)\n\t"
++ "movnti %1,2*%c2(%0)\n\t"
++ "movnti %1,3*%c2(%0)\n\t"
++ : : "r" (v), "r" (0L), "i" (sizeof(long))
++ : "memory");
++ asm volatile("sfence" : : : "memory");
++ } else
++ for (; count--; v += PAGE_SIZE)
++ clear_page(v);
++}
+diff -rpuN linux-2.6.18.8/arch/i386/mach-xen/Makefile linux-2.6.18-xen-3.2.0/arch/i386/mach-xen/Makefile
+--- linux-2.6.18.8/arch/i386/mach-xen/Makefile 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/mach-xen/Makefile 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,5 @@
++#
++# Makefile for the linux kernel.
++#
++
++obj-y := setup.o
+diff -rpuN linux-2.6.18.8/arch/i386/mach-xen/setup.c linux-2.6.18-xen-3.2.0/arch/i386/mach-xen/setup.c
+--- linux-2.6.18.8/arch/i386/mach-xen/setup.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/mach-xen/setup.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,147 @@
++/*
++ * Machine specific setup for generic
++ */
++
++#include <linux/smp.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/module.h>
++#include <asm/acpi.h>
++#include <asm/arch_hooks.h>
++#include <asm/e820.h>
++#include <asm/setup.h>
++#include <asm/fixmap.h>
++
++#include <xen/interface/callback.h>
++#include <xen/interface/memory.h>
++
++#ifdef CONFIG_HOTPLUG_CPU
++#define DEFAULT_SEND_IPI (1)
++#else
++#define DEFAULT_SEND_IPI (0)
++#endif
++
++int no_broadcast=DEFAULT_SEND_IPI;
++
++static __init int no_ipi_broadcast(char *str)
++{
++ get_option(&str, &no_broadcast);
++ printk ("Using %s mode\n", no_broadcast ? "No IPI Broadcast" :
++ "IPI Broadcast");
++ return 1;
++}
++
++__setup("no_ipi_broadcast", no_ipi_broadcast);
++
++static int __init print_ipi_mode(void)
++{
++ printk ("Using IPI %s mode\n", no_broadcast ? "No-Shortcut" :
++ "Shortcut");
++ return 0;
++}
++
++late_initcall(print_ipi_mode);
++
++/**
++ * machine_specific_memory_setup - Hook for machine specific memory setup.
++ *
++ * Description:
++ * This is included late in kernel/setup.c so that it can make
++ * use of all of the static functions.
++ **/
++
++char * __init machine_specific_memory_setup(void)
++{
++ int rc;
++ struct xen_memory_map memmap;
++ /*
++ * This is rather large for a stack variable but this early in
++ * the boot process we know we have plenty slack space.
++ */
++ struct e820entry map[E820MAX];
++
++ memmap.nr_entries = E820MAX;
++ set_xen_guest_handle(memmap.buffer, map);
++
++ rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
++ if ( rc == -ENOSYS ) {
++ memmap.nr_entries = 1;
++ map[0].addr = 0ULL;
++ map[0].size = PFN_PHYS((unsigned long long)xen_start_info->nr_pages);
++ /* 8MB slack (to balance backend allocations). */
++ map[0].size += 8ULL << 20;
++ map[0].type = E820_RAM;
++ rc = 0;
++ }
++ BUG_ON(rc);
++
++ sanitize_e820_map(map, (char *)&memmap.nr_entries);
++
++ BUG_ON(copy_e820_map(map, (char)memmap.nr_entries) < 0);
++
++ return "Xen";
++}
++
++
++extern void hypervisor_callback(void);
++extern void failsafe_callback(void);
++extern void nmi(void);
++
++unsigned long *machine_to_phys_mapping;
++EXPORT_SYMBOL(machine_to_phys_mapping);
++unsigned int machine_to_phys_order;
++EXPORT_SYMBOL(machine_to_phys_order);
++
++void __init machine_specific_arch_setup(void)
++{
++ int ret;
++ struct xen_machphys_mapping mapping;
++ unsigned long machine_to_phys_nr_ents;
++ struct xen_platform_parameters pp;
++ static struct callback_register __initdata event = {
++ .type = CALLBACKTYPE_event,
++ .address = { __KERNEL_CS, (unsigned long)hypervisor_callback },
++ };
++ static struct callback_register __initdata failsafe = {
++ .type = CALLBACKTYPE_failsafe,
++ .address = { __KERNEL_CS, (unsigned long)failsafe_callback },
++ };
++ static struct callback_register __initdata nmi_cb = {
++ .type = CALLBACKTYPE_nmi,
++ .address = { __KERNEL_CS, (unsigned long)nmi },
++ };
++
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
++ if (ret == 0)
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (ret == -ENOSYS)
++ ret = HYPERVISOR_set_callbacks(
++ event.address.cs, event.address.eip,
++ failsafe.address.cs, failsafe.address.eip);
++#endif
++ BUG_ON(ret);
++
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb);
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (ret == -ENOSYS) {
++ static struct xennmi_callback __initdata cb = {
++ .handler_address = (unsigned long)nmi
++ };
++
++ HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
++ }
++#endif
++
++ if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
++ set_fixaddr_top(pp.virt_start);
++
++ machine_to_phys_mapping = (unsigned long *)MACH2PHYS_VIRT_START;
++ machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
++ if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
++ machine_to_phys_mapping = (unsigned long *)mapping.v_start;
++ machine_to_phys_nr_ents = mapping.max_mfn + 1;
++ }
++ while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents )
++ machine_to_phys_order++;
++}
+diff -rpuN linux-2.6.18.8/arch/i386/mm/Makefile linux-2.6.18-xen-3.2.0/arch/i386/mm/Makefile
+--- linux-2.6.18.8/arch/i386/mm/Makefile 2007-02-23 15:52:30.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/mm/Makefile 2008-02-15 16:21:49.000000000 -0800
+@@ -8,3 +8,4 @@ obj-$(CONFIG_NUMA) += discontig.o
+ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+ obj-$(CONFIG_HIGHMEM) += highmem.o
+ obj-$(CONFIG_BOOT_IOREMAP) += boot_ioremap.o
++obj-$(CONFIG_XEN) += hypervisor.o
+diff -rpuN linux-2.6.18.8/arch/i386/mm/fault-xen.c linux-2.6.18-xen-3.2.0/arch/i386/mm/fault-xen.c
+--- linux-2.6.18.8/arch/i386/mm/fault-xen.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/mm/fault-xen.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,779 @@
++/*
++ * linux/arch/i386/mm/fault.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ */
++
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/tty.h>
++#include <linux/vt_kern.h> /* For unblank_screen() */
++#include <linux/highmem.h>
++#include <linux/module.h>
++#include <linux/kprobes.h>
++
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/desc.h>
++#include <asm/kdebug.h>
++
++extern void die(const char *,struct pt_regs *,long);
++
++#ifdef CONFIG_KPROBES
++ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
++int register_page_fault_notifier(struct notifier_block *nb)
++{
++ vmalloc_sync_all();
++ return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
++}
++
++int unregister_page_fault_notifier(struct notifier_block *nb)
++{
++ return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
++}
++
++static inline int notify_page_fault(enum die_val val, const char *str,
++ struct pt_regs *regs, long err, int trap, int sig)
++{
++ struct die_args args = {
++ .regs = regs,
++ .str = str,
++ .err = err,
++ .trapnr = trap,
++ .signr = sig
++ };
++ return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
++}
++#else
++static inline int notify_page_fault(enum die_val val, const char *str,
++ struct pt_regs *regs, long err, int trap, int sig)
++{
++ return NOTIFY_DONE;
++}
++#endif
++
++
++/*
++ * Unlock any spinlocks which will prevent us from getting the
++ * message out
++ */
++void bust_spinlocks(int yes)
++{
++ int loglevel_save = console_loglevel;
++
++ if (yes) {
++ oops_in_progress = 1;
++ return;
++ }
++#ifdef CONFIG_VT
++ unblank_screen();
++#endif
++ oops_in_progress = 0;
++ /*
++ * OK, the message is on the console. Now we call printk()
++ * without oops_in_progress set so that printk will give klogd
++ * a poke. Hold onto your hats...
++ */
++ console_loglevel = 15; /* NMI oopser may have shut the console up */
++ printk(" ");
++ console_loglevel = loglevel_save;
++}
++
++/*
++ * Return EIP plus the CS segment base. The segment limit is also
++ * adjusted, clamped to the kernel/user address space (whichever is
++ * appropriate), and returned in *eip_limit.
++ *
++ * The segment is checked, because it might have been changed by another
++ * task between the original faulting instruction and here.
++ *
++ * If CS is no longer a valid code segment, or if EIP is beyond the
++ * limit, or if it is a kernel address when CS is not a kernel segment,
++ * then the returned value will be greater than *eip_limit.
++ *
++ * This is slow, but is very rarely executed.
++ */
++static inline unsigned long get_segment_eip(struct pt_regs *regs,
++ unsigned long *eip_limit)
++{
++ unsigned long eip = regs->eip;
++ unsigned seg = regs->xcs & 0xffff;
++ u32 seg_ar, seg_limit, base, *desc;
++
++ /* Unlikely, but must come before segment checks. */
++ if (unlikely(regs->eflags & VM_MASK)) {
++ base = seg << 4;
++ *eip_limit = base + 0xffff;
++ return base + (eip & 0xffff);
++ }
++
++ /* The standard kernel/user address space limit. */
++ *eip_limit = (seg & 2) ? USER_DS.seg : KERNEL_DS.seg;
++
++ /* By far the most common cases. */
++ if (likely(seg == __USER_CS || seg == GET_KERNEL_CS()))
++ return eip;
++
++ /* Check the segment exists, is within the current LDT/GDT size,
++ that kernel/user (ring 0..3) has the appropriate privilege,
++ that it's a code segment, and get the limit. */
++ __asm__ ("larl %3,%0; lsll %3,%1"
++ : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
++ if ((~seg_ar & 0x9800) || eip > seg_limit) {
++ *eip_limit = 0;
++ return 1; /* So that returned eip > *eip_limit. */
++ }
++
++ /* Get the GDT/LDT descriptor base.
++ When you look for races in this code remember that
++ LDT and other horrors are only used in user space. */
++ if (seg & (1<<2)) {
++ /* Must lock the LDT while reading it. */
++ down(&current->mm->context.sem);
++ desc = current->mm->context.ldt;
++ desc = (void *)desc + (seg & ~7);
++ } else {
++ /* Must disable preemption while reading the GDT. */
++ desc = (u32 *)get_cpu_gdt_table(get_cpu());
++ desc = (void *)desc + (seg & ~7);
++ }
++
++ /* Decode the code segment base from the descriptor */
++ base = get_desc_base((unsigned long *)desc);
++
++ if (seg & (1<<2)) {
++ up(&current->mm->context.sem);
++ } else
++ put_cpu();
++
++ /* Adjust EIP and segment limit, and clamp at the kernel limit.
++ It's legitimate for segments to wrap at 0xffffffff. */
++ seg_limit += base;
++ if (seg_limit < *eip_limit && seg_limit >= base)
++ *eip_limit = seg_limit;
++ return eip + base;
++}
++
++/*
++ * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
++ * Check that here and ignore it.
++ */
++static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
++{
++ unsigned long limit;
++ unsigned long instr = get_segment_eip (regs, &limit);
++ int scan_more = 1;
++ int prefetch = 0;
++ int i;
++
++ for (i = 0; scan_more && i < 15; i++) {
++ unsigned char opcode;
++ unsigned char instr_hi;
++ unsigned char instr_lo;
++
++ if (instr > limit)
++ break;
++ if (__get_user(opcode, (unsigned char __user *) instr))
++ break;
++
++ instr_hi = opcode & 0xf0;
++ instr_lo = opcode & 0x0f;
++ instr++;
++
++ switch (instr_hi) {
++ case 0x20:
++ case 0x30:
++ /* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
++ scan_more = ((instr_lo & 7) == 0x6);
++ break;
++
++ case 0x60:
++ /* 0x64 thru 0x67 are valid prefixes in all modes. */
++ scan_more = (instr_lo & 0xC) == 0x4;
++ break;
++ case 0xF0:
++ /* 0xF0, 0xF2, and 0xF3 are valid prefixes */
++ scan_more = !instr_lo || (instr_lo>>1) == 1;
++ break;
++ case 0x00:
++ /* Prefetch instruction is 0x0F0D or 0x0F18 */
++ scan_more = 0;
++ if (instr > limit)
++ break;
++ if (__get_user(opcode, (unsigned char __user *) instr))
++ break;
++ prefetch = (instr_lo == 0xF) &&
++ (opcode == 0x0D || opcode == 0x18);
++ break;
++ default:
++ scan_more = 0;
++ break;
++ }
++ }
++ return prefetch;
++}
++
++static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
++ unsigned long error_code)
++{
++ if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
++ boot_cpu_data.x86 >= 6)) {
++ /* Catch an obscure case of prefetch inside an NX page. */
++ if (nx_enabled && (error_code & 16))
++ return 0;
++ return __is_prefetch(regs, addr);
++ }
++ return 0;
++}
++
++static noinline void force_sig_info_fault(int si_signo, int si_code,
++ unsigned long address, struct task_struct *tsk)
++{
++ siginfo_t info;
++
++ info.si_signo = si_signo;
++ info.si_errno = 0;
++ info.si_code = si_code;
++ info.si_addr = (void __user *)address;
++ force_sig_info(si_signo, &info, tsk);
++}
++
++fastcall void do_invalid_op(struct pt_regs *, unsigned long);
++
++#ifdef CONFIG_X86_PAE
++static void dump_fault_path(unsigned long address)
++{
++ unsigned long *p, page;
++ unsigned long mfn;
++
++ page = read_cr3();
++ p = (unsigned long *)__va(page);
++ p += (address >> 30) * 2;
++ printk(KERN_ALERT "%08lx -> *pde = %08lx:%08lx\n", page, p[1], p[0]);
++ if (p[0] & _PAGE_PRESENT) {
++ mfn = (p[0] >> PAGE_SHIFT) | (p[1] << 20);
++ page = mfn_to_pfn(mfn) << PAGE_SHIFT;
++ p = (unsigned long *)__va(page);
++ address &= 0x3fffffff;
++ p += (address >> 21) * 2;
++ printk(KERN_ALERT "%08lx -> *pme = %08lx:%08lx\n",
++ page, p[1], p[0]);
++ mfn = (p[0] >> PAGE_SHIFT) | (p[1] << 20);
++#ifdef CONFIG_HIGHPTE
++ if (mfn_to_pfn(mfn) >= highstart_pfn)
++ return;
++#endif
++ if (p[0] & _PAGE_PRESENT) {
++ page = mfn_to_pfn(mfn) << PAGE_SHIFT;
++ p = (unsigned long *) __va(page);
++ address &= 0x001fffff;
++ p += (address >> 12) * 2;
++ printk(KERN_ALERT "%08lx -> *pte = %08lx:%08lx\n",
++ page, p[1], p[0]);
++ }
++ }
++}
++#else
++static void dump_fault_path(unsigned long address)
++{
++ unsigned long page;
++
++ page = read_cr3();
++ page = ((unsigned long *) __va(page))[address >> 22];
++ if (oops_may_print())
++ printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
++ machine_to_phys(page));
++ /*
++ * We must not directly access the pte in the highpte
++ * case if the page table is located in highmem.
++ * And lets rather not kmap-atomic the pte, just in case
++ * it's allocated already.
++ */
++#ifdef CONFIG_HIGHPTE
++ if ((page >> PAGE_SHIFT) >= highstart_pfn)
++ return;
++#endif
++ if ((page & 1) && oops_may_print()) {
++ page &= PAGE_MASK;
++ address &= 0x003ff000;
++ page = machine_to_phys(page);
++ page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
++ printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page,
++ machine_to_phys(page));
++ }
++}
++#endif
++
++static int spurious_fault(struct pt_regs *regs,
++ unsigned long address,
++ unsigned long error_code)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ /* Reserved-bit violation or user access to kernel space? */
++ if (error_code & 0x0c)
++ return 0;
++
++ pgd = init_mm.pgd + pgd_index(address);
++ if (!pgd_present(*pgd))
++ return 0;
++
++ pud = pud_offset(pgd, address);
++ if (!pud_present(*pud))
++ return 0;
++
++ pmd = pmd_offset(pud, address);
++ if (!pmd_present(*pmd))
++ return 0;
++
++ pte = pte_offset_kernel(pmd, address);
++ if (!pte_present(*pte))
++ return 0;
++ if ((error_code & 0x02) && !pte_write(*pte))
++ return 0;
++#ifdef CONFIG_X86_PAE
++ if ((error_code & 0x10) && (__pte_val(*pte) & _PAGE_NX))
++ return 0;
++#endif
++
++ return 1;
++}
++
++static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
++{
++ unsigned index = pgd_index(address);
++ pgd_t *pgd_k;
++ pud_t *pud, *pud_k;
++ pmd_t *pmd, *pmd_k;
++
++ pgd += index;
++ pgd_k = init_mm.pgd + index;
++
++ if (!pgd_present(*pgd_k))
++ return NULL;
++
++ /*
++ * set_pgd(pgd, *pgd_k); here would be useless on PAE
++ * and redundant with the set_pmd() on non-PAE. As would
++ * set_pud.
++ */
++
++ pud = pud_offset(pgd, address);
++ pud_k = pud_offset(pgd_k, address);
++ if (!pud_present(*pud_k))
++ return NULL;
++
++ pmd = pmd_offset(pud, address);
++ pmd_k = pmd_offset(pud_k, address);
++ if (!pmd_present(*pmd_k))
++ return NULL;
++ if (!pmd_present(*pmd))
++#if CONFIG_XEN_COMPAT > 0x030002
++ set_pmd(pmd, *pmd_k);
++#else
++ /*
++ * When running on older Xen we must launder *pmd_k through
++ * pmd_val() to ensure that _PAGE_PRESENT is correctly set.
++ */
++ set_pmd(pmd, __pmd(pmd_val(*pmd_k)));
++#endif
++ else
++ BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
++ return pmd_k;
++}
++
++/*
++ * Handle a fault on the vmalloc or module mapping area
++ *
++ * This assumes no large pages in there.
++ */
++static inline int vmalloc_fault(unsigned long address)
++{
++ unsigned long pgd_paddr;
++ pmd_t *pmd_k;
++ pte_t *pte_k;
++ /*
++ * Synchronize this task's top level page-table
++ * with the 'reference' page table.
++ *
++ * Do _not_ use "current" here. We might be inside
++ * an interrupt in the middle of a task switch..
++ */
++ pgd_paddr = read_cr3();
++ pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
++ if (!pmd_k)
++ return -1;
++ pte_k = pte_offset_kernel(pmd_k, address);
++ if (!pte_present(*pte_k))
++ return -1;
++ return 0;
++}
++
++/*
++ * This routine handles page faults. It determines the address,
++ * and the problem, and then passes it off to one of the appropriate
++ * routines.
++ *
++ * error_code:
++ * bit 0 == 0 means no page found, 1 means protection fault
++ * bit 1 == 0 means read, 1 means write
++ * bit 2 == 0 means kernel, 1 means user-mode
++ * bit 3 == 1 means use of reserved bit detected
++ * bit 4 == 1 means fault was an instruction fetch
++ */
++fastcall void __kprobes do_page_fault(struct pt_regs *regs,
++ unsigned long error_code)
++{
++ struct task_struct *tsk;
++ struct mm_struct *mm;
++ struct vm_area_struct * vma;
++ unsigned long address;
++ int write, si_code;
++
++ /* get the address */
++ address = read_cr2();
++
++ /* Set the "privileged fault" bit to something sane. */
++ error_code &= ~4;
++ error_code |= (regs->xcs & 2) << 1;
++ if (regs->eflags & X86_EFLAGS_VM)
++ error_code |= 4;
++
++ tsk = current;
++
++ si_code = SEGV_MAPERR;
++
++ /*
++ * We fault-in kernel-space virtual memory on-demand. The
++ * 'reference' page table is init_mm.pgd.
++ *
++ * NOTE! We MUST NOT take any locks for this case. We may
++ * be in an interrupt or a critical region, and should
++ * only copy the information from the master page table,
++ * nothing more.
++ *
++ * This verifies that the fault happens in kernel space
++ * (error_code & 4) == 0, and that the fault was not a
++ * protection error (error_code & 9) == 0.
++ */
++ if (unlikely(address >= TASK_SIZE)) {
++#ifdef CONFIG_XEN
++ /* Faults in hypervisor area can never be patched up. */
++ if (address >= hypervisor_virt_start)
++ goto bad_area_nosemaphore;
++#endif
++ if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
++ return;
++ /* Can take a spurious fault if mapping changes R/O -> R/W. */
++ if (spurious_fault(regs, address, error_code))
++ return;
++ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
++ SIGSEGV) == NOTIFY_STOP)
++ return;
++ /*
++ * Don't take the mm semaphore here. If we fixup a prefetch
++ * fault we could otherwise deadlock.
++ */
++ goto bad_area_nosemaphore;
++ }
++
++ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
++ SIGSEGV) == NOTIFY_STOP)
++ return;
++
++ /* It's safe to allow irq's after cr2 has been saved and the vmalloc
++ fault has been handled. */
++ if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
++ local_irq_enable();
++
++ mm = tsk->mm;
++
++ /*
++ * If we're in an interrupt, have no user context or are running in an
++ * atomic region then we must not take the fault..
++ */
++ if (in_atomic() || !mm)
++ goto bad_area_nosemaphore;
++
++ /* When running in the kernel we expect faults to occur only to
++ * addresses in user space. All other faults represent errors in the
++ * kernel and should generate an OOPS. Unfortunatly, in the case of an
++ * erroneous fault occurring in a code path which already holds mmap_sem
++ * we will deadlock attempting to validate the fault against the
++ * address space. Luckily the kernel only validly references user
++ * space from well defined areas of code, which are listed in the
++ * exceptions table.
++ *
++ * As the vast majority of faults will be valid we will only perform
++ * the source reference check when there is a possibilty of a deadlock.
++ * Attempt to lock the address space, if we cannot we then validate the
++ * source. If this is invalid we can skip the address space check,
++ * thus avoiding the deadlock.
++ */
++ if (!down_read_trylock(&mm->mmap_sem)) {
++ if ((error_code & 4) == 0 &&
++ !search_exception_tables(regs->eip))
++ goto bad_area_nosemaphore;
++ down_read(&mm->mmap_sem);
++ }
++
++ vma = find_vma(mm, address);
++ if (!vma)
++ goto bad_area;
++ if (vma->vm_start <= address)
++ goto good_area;
++ if (!(vma->vm_flags & VM_GROWSDOWN))
++ goto bad_area;
++ if (error_code & 4) {
++ /*
++ * Accessing the stack below %esp is always a bug.
++ * The large cushion allows instructions like enter
++ * and pusha to work. ("enter $65535,$31" pushes
++ * 32 pointers and then decrements %esp by 65535.)
++ */
++ if (address + 65536 + 32 * sizeof(unsigned long) < regs->esp)
++ goto bad_area;
++ }
++ if (expand_stack(vma, address))
++ goto bad_area;
++/*
++ * Ok, we have a good vm_area for this memory access, so
++ * we can handle it..
++ */
++good_area:
++ si_code = SEGV_ACCERR;
++ write = 0;
++ switch (error_code & 3) {
++ default: /* 3: write, present */
++#ifdef TEST_VERIFY_AREA
++ if (regs->cs == GET_KERNEL_CS())
++ printk("WP fault at %08lx\n", regs->eip);
++#endif
++ /* fall through */
++ case 2: /* write, not present */
++ if (!(vma->vm_flags & VM_WRITE))
++ goto bad_area;
++ write++;
++ break;
++ case 1: /* read, present */
++ goto bad_area;
++ case 0: /* read, not present */
++ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
++ goto bad_area;
++ }
++
++ survive:
++ /*
++ * If for any reason at all we couldn't handle the fault,
++ * make sure we exit gracefully rather than endlessly redo
++ * the fault.
++ */
++ switch (handle_mm_fault(mm, vma, address, write)) {
++ case VM_FAULT_MINOR:
++ tsk->min_flt++;
++ break;
++ case VM_FAULT_MAJOR:
++ tsk->maj_flt++;
++ break;
++ case VM_FAULT_SIGBUS:
++ goto do_sigbus;
++ case VM_FAULT_OOM:
++ goto out_of_memory;
++ default:
++ BUG();
++ }
++
++ /*
++ * Did it hit the DOS screen memory VA from vm86 mode?
++ */
++ if (regs->eflags & VM_MASK) {
++ unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
++ if (bit < 32)
++ tsk->thread.screen_bitmap |= 1 << bit;
++ }
++ up_read(&mm->mmap_sem);
++ return;
++
++/*
++ * Something tried to access memory that isn't in our memory map..
++ * Fix it, but check if it's kernel or user first..
++ */
++bad_area:
++ up_read(&mm->mmap_sem);
++
++bad_area_nosemaphore:
++ /* User mode accesses just cause a SIGSEGV */
++ if (error_code & 4) {
++ /*
++ * Valid to do another page fault here because this one came
++ * from user space.
++ */
++ if (is_prefetch(regs, address, error_code))
++ return;
++
++ tsk->thread.cr2 = address;
++ /* Kernel addresses are always protection faults */
++ tsk->thread.error_code = error_code | (address >= TASK_SIZE);
++ tsk->thread.trap_no = 14;
++ force_sig_info_fault(SIGSEGV, si_code, address, tsk);
++ return;
++ }
++
++#ifdef CONFIG_X86_F00F_BUG
++ /*
++ * Pentium F0 0F C7 C8 bug workaround.
++ */
++ if (boot_cpu_data.f00f_bug) {
++ unsigned long nr;
++
++ nr = (address - idt_descr.address) >> 3;
++
++ if (nr == 6) {
++ do_invalid_op(regs, 0);
++ return;
++ }
++ }
++#endif
++
++no_context:
++ /* Are we prepared to handle this kernel fault? */
++ if (fixup_exception(regs))
++ return;
++
++ /*
++ * Valid to do another page fault here, because if this fault
++ * had been triggered by is_prefetch fixup_exception would have
++ * handled it.
++ */
++ if (is_prefetch(regs, address, error_code))
++ return;
++
++/*
++ * Oops. The kernel tried to access some bad page. We'll have to
++ * terminate things with extreme prejudice.
++ */
++
++ bust_spinlocks(1);
++
++ if (oops_may_print()) {
++ #ifdef CONFIG_X86_PAE
++ if (error_code & 16) {
++ pte_t *pte = lookup_address(address);
++
++ if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
++ printk(KERN_CRIT "kernel tried to execute "
++ "NX-protected page - exploit attempt? "
++ "(uid: %d)\n", current->uid);
++ }
++ #endif
++ if (address < PAGE_SIZE)
++ printk(KERN_ALERT "BUG: unable to handle kernel NULL "
++ "pointer dereference");
++ else
++ printk(KERN_ALERT "BUG: unable to handle kernel paging"
++ " request");
++ printk(" at virtual address %08lx\n",address);
++ printk(KERN_ALERT " printing eip:\n");
++ printk("%08lx\n", regs->eip);
++ }
++ dump_fault_path(address);
++ tsk->thread.cr2 = address;
++ tsk->thread.trap_no = 14;
++ tsk->thread.error_code = error_code;
++ die("Oops", regs, error_code);
++ bust_spinlocks(0);
++ do_exit(SIGKILL);
++
++/*
++ * We ran out of memory, or some other thing happened to us that made
++ * us unable to handle the page fault gracefully.
++ */
++out_of_memory:
++ up_read(&mm->mmap_sem);
++ if (tsk->pid == 1) {
++ yield();
++ down_read(&mm->mmap_sem);
++ goto survive;
++ }
++ printk("VM: killing process %s\n", tsk->comm);
++ if (error_code & 4)
++ do_exit(SIGKILL);
++ goto no_context;
++
++do_sigbus:
++ up_read(&mm->mmap_sem);
++
++ /* Kernel mode? Handle exceptions or die */
++ if (!(error_code & 4))
++ goto no_context;
++
++ /* User space => ok to do another page fault */
++ if (is_prefetch(regs, address, error_code))
++ return;
++
++ tsk->thread.cr2 = address;
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = 14;
++ force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
++}
++
++#if !HAVE_SHARED_KERNEL_PMD
++void vmalloc_sync_all(void)
++{
++ /*
++ * Note that races in the updates of insync and start aren't
++ * problematic: insync can only get set bits added, and updates to
++ * start are only improving performance (without affecting correctness
++ * if undone).
++ * XEN: To work on PAE, we need to iterate over PMDs rather than PGDs.
++ * This change works just fine with 2-level paging too.
++ */
++#define sync_index(a) ((a) >> PMD_SHIFT)
++ static DECLARE_BITMAP(insync, PTRS_PER_PGD*PTRS_PER_PMD);
++ static unsigned long start = TASK_SIZE;
++ unsigned long address;
++
++ BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
++ for (address = start;
++ address >= TASK_SIZE && address < hypervisor_virt_start;
++ address += 1UL << PMD_SHIFT) {
++ if (!test_bit(sync_index(address), insync)) {
++ unsigned long flags;
++ struct page *page;
++
++ spin_lock_irqsave(&pgd_lock, flags);
++ /* XEN: failure path assumes non-empty pgd_list. */
++ if (unlikely(!pgd_list)) {
++ spin_unlock_irqrestore(&pgd_lock, flags);
++ return;
++ }
++ for (page = pgd_list; page; page =
++ (struct page *)page->index)
++ if (!vmalloc_sync_one(page_address(page),
++ address)) {
++ BUG_ON(page != pgd_list);
++ break;
++ }
++ spin_unlock_irqrestore(&pgd_lock, flags);
++ if (!page)
++ set_bit(sync_index(address), insync);
++ }
++ if (address == start && test_bit(sync_index(address), insync))
++ start = address + (1UL << PMD_SHIFT);
++ }
++}
++#endif
+diff -rpuN linux-2.6.18.8/arch/i386/mm/highmem-xen.c linux-2.6.18-xen-3.2.0/arch/i386/mm/highmem-xen.c
+--- linux-2.6.18.8/arch/i386/mm/highmem-xen.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/mm/highmem-xen.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,136 @@
++#include <linux/highmem.h>
++#include <linux/module.h>
++
++void *kmap(struct page *page)
++{
++ might_sleep();
++ if (!PageHighMem(page))
++ return page_address(page);
++ return kmap_high(page);
++}
++
++void kunmap(struct page *page)
++{
++ if (in_interrupt())
++ BUG();
++ if (!PageHighMem(page))
++ return;
++ kunmap_high(page);
++}
++
++/*
++ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
++ * no global lock is needed and because the kmap code must perform a global TLB
++ * invalidation when the kmap pool wraps.
++ *
++ * However when holding an atomic kmap is is not legal to sleep, so atomic
++ * kmaps are appropriate for short, tight code paths only.
++ */
++static void *__kmap_atomic(struct page *page, enum km_type type, pgprot_t prot)
++{
++ enum fixed_addresses idx;
++ unsigned long vaddr;
++
++ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
++ inc_preempt_count();
++ if (!PageHighMem(page))
++ return page_address(page);
++
++ idx = type + KM_TYPE_NR*smp_processor_id();
++ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++#ifdef CONFIG_DEBUG_HIGHMEM
++ if (!pte_none(*(kmap_pte-idx)))
++ BUG();
++#endif
++ set_pte_at_sync(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
++
++ return (void*) vaddr;
++}
++
++void *kmap_atomic(struct page *page, enum km_type type)
++{
++ return __kmap_atomic(page, type, kmap_prot);
++}
++
++/* Same as kmap_atomic but with PAGE_KERNEL_RO page protection. */
++void *kmap_atomic_pte(struct page *page, enum km_type type)
++{
++ return __kmap_atomic(page, type,
++ test_bit(PG_pinned, &page->flags)
++ ? PAGE_KERNEL_RO : kmap_prot);
++}
++
++void kunmap_atomic(void *kvaddr, enum km_type type)
++{
++#if defined(CONFIG_DEBUG_HIGHMEM) || defined(CONFIG_XEN)
++ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
++ enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
++
++ if (vaddr < FIXADDR_START) { // FIXME
++ dec_preempt_count();
++ preempt_check_resched();
++ return;
++ }
++#endif
++
++#if defined(CONFIG_DEBUG_HIGHMEM)
++ if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
++ BUG();
++
++ /*
++ * force other mappings to Oops if they'll try to access
++ * this pte without first remap it
++ */
++ pte_clear(&init_mm, vaddr, kmap_pte-idx);
++ __flush_tlb_one(vaddr);
++#elif defined(CONFIG_XEN)
++ /*
++ * We must ensure there are no dangling pagetable references when
++ * returning memory to Xen (decrease_reservation).
++ * XXX TODO: We could make this faster by only zapping when
++ * kmap_flush_unused is called but that is trickier and more invasive.
++ */
++ pte_clear(&init_mm, vaddr, kmap_pte-idx);
++#endif
++
++ dec_preempt_count();
++ preempt_check_resched();
++}
++
++/* This is the same as kmap_atomic() but can map memory that doesn't
++ * have a struct page associated with it.
++ */
++void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
++{
++ enum fixed_addresses idx;
++ unsigned long vaddr;
++
++ inc_preempt_count();
++
++ idx = type + KM_TYPE_NR*smp_processor_id();
++ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++ set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
++ __flush_tlb_one(vaddr);
++
++ return (void*) vaddr;
++}
++
++struct page *kmap_atomic_to_page(void *ptr)
++{
++ unsigned long idx, vaddr = (unsigned long)ptr;
++ pte_t *pte;
++
++ if (vaddr < FIXADDR_START)
++ return virt_to_page(ptr);
++
++ idx = virt_to_fix(vaddr);
++ pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
++ return pte_page(*pte);
++}
++
++EXPORT_SYMBOL(kmap);
++EXPORT_SYMBOL(kunmap);
++EXPORT_SYMBOL(kmap_atomic);
++EXPORT_SYMBOL(kmap_atomic_pte);
++EXPORT_SYMBOL(kunmap_atomic);
++EXPORT_SYMBOL(kmap_atomic_to_page);
+diff -rpuN linux-2.6.18.8/arch/i386/mm/hypervisor.c linux-2.6.18-xen-3.2.0/arch/i386/mm/hypervisor.c
+--- linux-2.6.18.8/arch/i386/mm/hypervisor.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/mm/hypervisor.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,602 @@
++/******************************************************************************
++ * mm/hypervisor.c
++ *
++ * Update page tables via the hypervisor.
++ *
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/vmalloc.h>
++#include <asm/page.h>
++#include <asm/pgtable.h>
++#include <asm/hypervisor.h>
++#include <xen/balloon.h>
++#include <xen/features.h>
++#include <xen/interface/memory.h>
++#include <linux/module.h>
++#include <linux/percpu.h>
++#include <asm/tlbflush.h>
++#include <linux/highmem.h>
++
++void xen_l1_entry_update(pte_t *ptr, pte_t val)
++{
++ mmu_update_t u;
++#ifdef CONFIG_HIGHPTE
++ u.ptr = ((unsigned long)ptr >= (unsigned long)high_memory) ?
++ arbitrary_virt_to_machine(ptr) : virt_to_machine(ptr);
++#else
++ u.ptr = virt_to_machine(ptr);
++#endif
++ u.val = __pte_val(val);
++ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++EXPORT_SYMBOL_GPL(xen_l1_entry_update);
++
++void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
++{
++ mmu_update_t u;
++ u.ptr = virt_to_machine(ptr);
++ u.val = __pmd_val(val);
++ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++
++#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
++void xen_l3_entry_update(pud_t *ptr, pud_t val)
++{
++ mmu_update_t u;
++ u.ptr = virt_to_machine(ptr);
++ u.val = __pud_val(val);
++ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++#endif
++
++#ifdef CONFIG_X86_64
++void xen_l4_entry_update(pgd_t *ptr, pgd_t val)
++{
++ mmu_update_t u;
++ u.ptr = virt_to_machine(ptr);
++ u.val = __pgd_val(val);
++ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++#endif /* CONFIG_X86_64 */
++
++void xen_pt_switch(unsigned long ptr)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_NEW_BASEPTR;
++ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_new_user_pt(unsigned long ptr)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_NEW_USER_BASEPTR;
++ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_tlb_flush(void)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++EXPORT_SYMBOL(xen_tlb_flush);
++
++void xen_invlpg(unsigned long ptr)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_INVLPG_LOCAL;
++ op.arg1.linear_addr = ptr & PAGE_MASK;
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++EXPORT_SYMBOL(xen_invlpg);
++
++#ifdef CONFIG_SMP
++
++void xen_tlb_flush_all(void)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_TLB_FLUSH_ALL;
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_tlb_flush_mask(cpumask_t *mask)
++{
++ struct mmuext_op op;
++ if ( cpus_empty(*mask) )
++ return;
++ op.cmd = MMUEXT_TLB_FLUSH_MULTI;
++ set_xen_guest_handle(op.arg2.vcpumask, mask->bits);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_invlpg_all(unsigned long ptr)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_INVLPG_ALL;
++ op.arg1.linear_addr = ptr & PAGE_MASK;
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr)
++{
++ struct mmuext_op op;
++ if ( cpus_empty(*mask) )
++ return;
++ op.cmd = MMUEXT_INVLPG_MULTI;
++ op.arg1.linear_addr = ptr & PAGE_MASK;
++ set_xen_guest_handle(op.arg2.vcpumask, mask->bits);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++#endif /* CONFIG_SMP */
++
++void xen_pgd_pin(unsigned long ptr)
++{
++ struct mmuext_op op;
++#ifdef CONFIG_X86_64
++ op.cmd = MMUEXT_PIN_L4_TABLE;
++#elif defined(CONFIG_X86_PAE)
++ op.cmd = MMUEXT_PIN_L3_TABLE;
++#else
++ op.cmd = MMUEXT_PIN_L2_TABLE;
++#endif
++ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_pgd_unpin(unsigned long ptr)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_UNPIN_TABLE;
++ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_set_ldt(const void *ptr, unsigned int ents)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_SET_LDT;
++ op.arg1.linear_addr = (unsigned long)ptr;
++ op.arg2.nr_ents = ents;
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++/*
++ * Bitmap is indexed by page number. If bit is set, the page is part of a
++ * xen_create_contiguous_region() area of memory.
++ */
++unsigned long *contiguous_bitmap;
++
++static void contiguous_bitmap_set(
++ unsigned long first_page, unsigned long nr_pages)
++{
++ unsigned long start_off, end_off, curr_idx, end_idx;
++
++ curr_idx = first_page / BITS_PER_LONG;
++ start_off = first_page & (BITS_PER_LONG-1);
++ end_idx = (first_page + nr_pages) / BITS_PER_LONG;
++ end_off = (first_page + nr_pages) & (BITS_PER_LONG-1);
++
++ if (curr_idx == end_idx) {
++ contiguous_bitmap[curr_idx] |=
++ ((1UL<<end_off)-1) & -(1UL<<start_off);
++ } else {
++ contiguous_bitmap[curr_idx] |= -(1UL<<start_off);
++ while ( ++curr_idx < end_idx )
++ contiguous_bitmap[curr_idx] = ~0UL;
++ contiguous_bitmap[curr_idx] |= (1UL<<end_off)-1;
++ }
++}
++
++static void contiguous_bitmap_clear(
++ unsigned long first_page, unsigned long nr_pages)
++{
++ unsigned long start_off, end_off, curr_idx, end_idx;
++
++ curr_idx = first_page / BITS_PER_LONG;
++ start_off = first_page & (BITS_PER_LONG-1);
++ end_idx = (first_page + nr_pages) / BITS_PER_LONG;
++ end_off = (first_page + nr_pages) & (BITS_PER_LONG-1);
++
++ if (curr_idx == end_idx) {
++ contiguous_bitmap[curr_idx] &=
++ -(1UL<<end_off) | ((1UL<<start_off)-1);
++ } else {
++ contiguous_bitmap[curr_idx] &= (1UL<<start_off)-1;
++ while ( ++curr_idx != end_idx )
++ contiguous_bitmap[curr_idx] = 0;
++ contiguous_bitmap[curr_idx] &= -(1UL<<end_off);
++ }
++}
++
++/* Protected by balloon_lock. */
++#define MAX_CONTIG_ORDER 9 /* 2MB */
++static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
++static unsigned long limited_frames[1<<MAX_CONTIG_ORDER];
++static multicall_entry_t cr_mcl[1<<MAX_CONTIG_ORDER];
++
++/* Ensure multi-page extents are contiguous in machine memory. */
++int xen_create_contiguous_region(
++ unsigned long vstart, unsigned int order, unsigned int address_bits)
++{
++ unsigned long *in_frames = discontig_frames, out_frame;
++ unsigned long frame, flags;
++ unsigned int i;
++ int rc, success;
++ struct xen_memory_exchange exchange = {
++ .in = {
++ .nr_extents = 1UL << order,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ },
++ .out = {
++ .nr_extents = 1,
++ .extent_order = order,
++ .address_bits = address_bits,
++ .domid = DOMID_SELF
++ }
++ };
++
++ /*
++ * Currently an auto-translated guest will not perform I/O, nor will
++ * it require PAE page directories below 4GB. Therefore any calls to
++ * this function are redundant and can be ignored.
++ */
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return 0;
++
++ if (unlikely(order > MAX_CONTIG_ORDER))
++ return -ENOMEM;
++
++ set_xen_guest_handle(exchange.in.extent_start, in_frames);
++ set_xen_guest_handle(exchange.out.extent_start, &out_frame);
++
++ scrub_pages((void *)vstart, 1 << order);
++
++ balloon_lock(flags);
++
++ /* 1. Zap current PTEs, remembering MFNs. */
++ for (i = 0; i < (1U<<order); i++) {
++ in_frames[i] = pfn_to_mfn((__pa(vstart) >> PAGE_SHIFT) + i);
++ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++ __pte_ma(0), 0);
++ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
++ INVALID_P2M_ENTRY);
++ }
++ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
++ BUG();
++
++ /* 2. Get a new contiguous memory extent. */
++ out_frame = __pa(vstart) >> PAGE_SHIFT;
++ rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
++ success = (exchange.nr_exchanged == (1UL << order));
++ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
++ BUG_ON(success && (rc != 0));
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (unlikely(rc == -ENOSYS)) {
++ /* Compatibility when XENMEM_exchange is unsupported. */
++ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &exchange.in) != (1UL << order))
++ BUG();
++ success = (HYPERVISOR_memory_op(XENMEM_populate_physmap,
++ &exchange.out) == 1);
++ if (!success) {
++ /* Couldn't get special memory: fall back to normal. */
++ for (i = 0; i < (1U<<order); i++)
++ in_frames[i] = (__pa(vstart)>>PAGE_SHIFT) + i;
++ if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
++ &exchange.in) != (1UL<<order))
++ BUG();
++ }
++ }
++#endif
++
++ /* 3. Map the new extent in place of old pages. */
++ for (i = 0; i < (1U<<order); i++) {
++ frame = success ? (out_frame + i) : in_frames[i];
++ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++ pfn_pte_ma(frame, PAGE_KERNEL), 0);
++ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
++ }
++
++ cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order
++ ? UVMF_TLB_FLUSH|UVMF_ALL
++ : UVMF_INVLPG|UVMF_ALL;
++ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
++ BUG();
++
++ if (success)
++ contiguous_bitmap_set(__pa(vstart) >> PAGE_SHIFT,
++ 1UL << order);
++
++ balloon_unlock(flags);
++
++ return success ? 0 : -ENOMEM;
++}
++EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
++
++void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
++{
++ unsigned long *out_frames = discontig_frames, in_frame;
++ unsigned long frame, flags;
++ unsigned int i;
++ int rc, success;
++ struct xen_memory_exchange exchange = {
++ .in = {
++ .nr_extents = 1,
++ .extent_order = order,
++ .domid = DOMID_SELF
++ },
++ .out = {
++ .nr_extents = 1UL << order,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ }
++ };
++
++ if (xen_feature(XENFEAT_auto_translated_physmap) ||
++ !test_bit(__pa(vstart) >> PAGE_SHIFT, contiguous_bitmap))
++ return;
++
++ if (unlikely(order > MAX_CONTIG_ORDER))
++ return;
++
++ set_xen_guest_handle(exchange.in.extent_start, &in_frame);
++ set_xen_guest_handle(exchange.out.extent_start, out_frames);
++
++ scrub_pages((void *)vstart, 1 << order);
++
++ balloon_lock(flags);
++
++ contiguous_bitmap_clear(__pa(vstart) >> PAGE_SHIFT, 1UL << order);
++
++ /* 1. Find start MFN of contiguous extent. */
++ in_frame = pfn_to_mfn(__pa(vstart) >> PAGE_SHIFT);
++
++ /* 2. Zap current PTEs. */
++ for (i = 0; i < (1U<<order); i++) {
++ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++ __pte_ma(0), 0);
++ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
++ INVALID_P2M_ENTRY);
++ out_frames[i] = (__pa(vstart) >> PAGE_SHIFT) + i;
++ }
++ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
++ BUG();
++
++ /* 3. Do the exchange for non-contiguous MFNs. */
++ rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
++ success = (exchange.nr_exchanged == 1);
++ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
++ BUG_ON(success && (rc != 0));
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (unlikely(rc == -ENOSYS)) {
++ /* Compatibility when XENMEM_exchange is unsupported. */
++ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &exchange.in) != 1)
++ BUG();
++ if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
++ &exchange.out) != (1UL << order))
++ BUG();
++ success = 1;
++ }
++#endif
++
++ /* 4. Map new pages in place of old pages. */
++ for (i = 0; i < (1U<<order); i++) {
++ frame = success ? out_frames[i] : (in_frame + i);
++ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++ pfn_pte_ma(frame, PAGE_KERNEL), 0);
++ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
++ }
++
++ cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order
++ ? UVMF_TLB_FLUSH|UVMF_ALL
++ : UVMF_INVLPG|UVMF_ALL;
++ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
++ BUG();
++
++ balloon_unlock(flags);
++}
++EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
++
++int xen_limit_pages_to_max_mfn(
++ struct page *pages, unsigned int order, unsigned int address_bits)
++{
++ unsigned long flags, frame;
++ unsigned long *in_frames = discontig_frames, *out_frames = limited_frames;
++ struct page *page;
++ unsigned int i, n, nr_mcl;
++ int rc, success;
++ DECLARE_BITMAP(limit_map, 1 << MAX_CONTIG_ORDER);
++
++ struct xen_memory_exchange exchange = {
++ .in = {
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ },
++ .out = {
++ .extent_order = 0,
++ .address_bits = address_bits,
++ .domid = DOMID_SELF
++ }
++ };
++
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return 0;
++
++ if (unlikely(order > MAX_CONTIG_ORDER))
++ return -ENOMEM;
++
++ bitmap_zero(limit_map, 1U << order);
++ set_xen_guest_handle(exchange.in.extent_start, in_frames);
++ set_xen_guest_handle(exchange.out.extent_start, out_frames);
++
++ /* 0. Scrub the pages. */
++ for (i = 0, n = 0; i < 1U<<order ; i++) {
++ page = &pages[i];
++ if (!(pfn_to_mfn(page_to_pfn(page)) >> (address_bits - PAGE_SHIFT)))
++ continue;
++ __set_bit(i, limit_map);
++
++ if (!PageHighMem(page))
++ scrub_pages(page_address(page), 1);
++#ifdef CONFIG_XEN_SCRUB_PAGES
++ else {
++ scrub_pages(kmap(page), 1);
++ kunmap(page);
++ ++n;
++ }
++#endif
++ }
++ if (bitmap_empty(limit_map, 1U << order))
++ return 0;
++
++ if (n)
++ kmap_flush_unused();
++
++ balloon_lock(flags);
++
++ /* 1. Zap current PTEs (if any), remembering MFNs. */
++ for (i = 0, n = 0, nr_mcl = 0; i < (1U<<order); i++) {
++ if(!test_bit(i, limit_map))
++ continue;
++ page = &pages[i];
++
++ out_frames[n] = page_to_pfn(page);
++ in_frames[n] = pfn_to_mfn(out_frames[n]);
++
++ if (!PageHighMem(page))
++ MULTI_update_va_mapping(cr_mcl + nr_mcl++,
++ (unsigned long)page_address(page),
++ __pte_ma(0), 0);
++
++ set_phys_to_machine(out_frames[n], INVALID_P2M_ENTRY);
++ ++n;
++ }
++ if (nr_mcl && HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL))
++ BUG();
++
++ /* 2. Get new memory below the required limit. */
++ exchange.in.nr_extents = n;
++ exchange.out.nr_extents = n;
++ rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
++ success = (exchange.nr_exchanged == n);
++ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
++ BUG_ON(success && (rc != 0));
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (unlikely(rc == -ENOSYS)) {
++ /* Compatibility when XENMEM_exchange is unsupported. */
++ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &exchange.in) != n)
++ BUG();
++ if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
++ &exchange.out) != n)
++ BUG();
++ success = 1;
++ }
++#endif
++
++ /* 3. Map the new pages in place of old pages. */
++ for (i = 0, n = 0, nr_mcl = 0; i < (1U<<order); i++) {
++ if(!test_bit(i, limit_map))
++ continue;
++ page = &pages[i];
++
++ frame = success ? out_frames[n] : in_frames[n];
++
++ if (!PageHighMem(page))
++ MULTI_update_va_mapping(cr_mcl + nr_mcl++,
++ (unsigned long)page_address(page),
++ pfn_pte_ma(frame, PAGE_KERNEL), 0);
++
++ set_phys_to_machine(page_to_pfn(page), frame);
++ ++n;
++ }
++ if (nr_mcl) {
++ cr_mcl[nr_mcl - 1].args[MULTI_UVMFLAGS_INDEX] = order
++ ? UVMF_TLB_FLUSH|UVMF_ALL
++ : UVMF_INVLPG|UVMF_ALL;
++ if (HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL))
++ BUG();
++ }
++
++ balloon_unlock(flags);
++
++ return success ? 0 : -ENOMEM;
++}
++EXPORT_SYMBOL_GPL(xen_limit_pages_to_max_mfn);
++
++#ifdef __i386__
++int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b)
++{
++ __u32 *lp = (__u32 *)((char *)ldt + entry * 8);
++ maddr_t mach_lp = arbitrary_virt_to_machine(lp);
++ return HYPERVISOR_update_descriptor(
++ mach_lp, (u64)entry_a | ((u64)entry_b<<32));
++}
++#endif
++
++#define MAX_BATCHED_FULL_PTES 32
++
++int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd,
++ unsigned long addr, unsigned long end, pgprot_t newprot)
++{
++ int rc = 0, i = 0;
++ mmu_update_t u[MAX_BATCHED_FULL_PTES];
++ pte_t *pte;
++ spinlock_t *ptl;
++
++ if (!xen_feature(XENFEAT_mmu_pt_update_preserve_ad))
++ return 0;
++
++ pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
++ do {
++ if (pte_present(*pte)) {
++ u[i].ptr = (__pmd_val(*pmd) & PHYSICAL_PAGE_MASK)
++ | ((unsigned long)pte & ~PAGE_MASK)
++ | MMU_PT_UPDATE_PRESERVE_AD;
++ u[i].val = __pte_val(pte_modify(*pte, newprot));
++ if (++i == MAX_BATCHED_FULL_PTES) {
++ if ((rc = HYPERVISOR_mmu_update(
++ &u[0], i, NULL, DOMID_SELF)) != 0)
++ break;
++ i = 0;
++ }
++ }
++ } while (pte++, addr += PAGE_SIZE, addr != end);
++ if (i)
++ rc = HYPERVISOR_mmu_update( &u[0], i, NULL, DOMID_SELF);
++ pte_unmap_unlock(pte - 1, ptl);
++ BUG_ON(rc && rc != -ENOSYS);
++ return !rc;
++}
+diff -rpuN linux-2.6.18.8/arch/i386/mm/init-xen.c linux-2.6.18-xen-3.2.0/arch/i386/mm/init-xen.c
+--- linux-2.6.18.8/arch/i386/mm/init-xen.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.18-xen-3.2.0/arch/i386/mm/init-xen.c 2008-02-15 16:21:49.000000000 -0800
+@@ -0,0 +1,850 @@
++/*
++ * linux/arch/i386/mm/init.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ *
++ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
++ */
++
++#include <linux/module.h>
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/hugetlb.h>
++#include <linux/swap.h>
++#include <linux/smp.h>
++#include <linux/init.h>
++#include <linux/highmem.h>
++#include <linux/pagemap.h>
++#include <linux/poison.h>
++#include <linux/bootmem.h>
++#include <linux/slab.h>
++#include <linux/proc_fs.h>
++#include <linux/efi.h>
++#include <linux/memory_hotplug.h>
++#include <linux/initrd.h>
++#include <linux/cpumask.h>
++#include <linux/dma-mapping.h>
++#include <linux/scatterlist.h>
++
++#include <asm/processor.h>
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/dma.h>
++#include <asm/fixmap.h>
++#include <asm/e820.h>
++#include <asm/apic.h>
++#include <asm/tlb.h>
++#include <asm/tlbflush.h>
++#include <asm/sections.h>
++#include <asm/hypervisor.h>
++#include <asm/swiotlb.h>
++
++extern unsigned long *contiguous_bitmap;
++
++unsigned int __VMALLOC_RESERVE = 128 << 20;
++
++DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
++unsigned long highstart_pfn, highend_pfn;
++
++static int noinline do_test_wp_bit(void);
++
++/*
++ * Creates a middle page table and puts a pointer to it in the
++ * given global directory entry. This only returns the gd entry
++ * in non-PAE compilation mode, since the middle layer is folded.
++ */
++static pmd_t * __init one_md_table_init(pgd_t *pgd)
++{
++ pud_t *pud;
++ pmd_t *pmd_table;
++
++#ifdef CONFIG_X86_PAE
++ pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
++ make_lowmem_page_readonly(pmd_table, XENFEAT_writable_page_tables);
++ set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
++ pud = pud_offset(pgd, 0);
++ if (pmd_table != pmd_offset(pud, 0))
++ BUG();
++#else
++ pud = pud_offset(pgd, 0);
++ pmd_table = pmd_offset(pud, 0);
++#endif
++
++ return pmd_table;
++}
++
++/*
++ * Create a page table and place a pointer to it in a middle page
++ * directory entry.
++ */
++static pte_t * __init one_page_table_init(pmd_t *pmd)
++{
++ if (pmd_none(*pmd)) {
++ pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
++ make_lowmem_page_readonly(page_table,
++ XENFEAT_writable_page_tables);
++ set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
++ if (page_table != pte_offset_kernel(pmd, 0))
++ BUG();
++
++ return page_table;
++ }
++
++ return pte_offset_kernel(pmd, 0);
++}
++
++/*
++ * This function initializes a certain range of kernel virtual memory
++ * with new bootmem page tables, everywhere page tables are missing in
++ * the given range.
++ */
++
++/*
++ * NOTE: The pagetables are allocated contiguous on the physical space
++ * so we can cache the place of the first one and move around without
++ * checking the pgd every time.
++ */
++static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ int pgd_idx, pmd_idx;
++ unsigned long vaddr;
++
++ vaddr = start;
++ pgd_idx = pgd_index(vaddr);
++ pmd_idx = pmd_index(vaddr);
++ pgd = pgd_base + pgd_idx;
++
++ for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
++ if (pgd_none(*pgd))
++ one_md_table_init(pgd);
++ pud = pud_offset(pgd, vaddr);
++ pmd = pmd_offset(pud, vaddr);
++ for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
++ if (vaddr < hypervisor_virt_start && pmd_none(*pmd))
++ one_page_table_init(pmd);
++
++ vaddr += PMD_SIZE;
++ }
++ pmd_idx = 0;
++ }
++}
++
++static inline int is_kernel_text(unsigned long addr)
++{
++ if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
++ return 1;
++ return 0;
++}
++
++/*
++ * This maps the physical memory to kernel virtual address space, a total
++ * of max_low_pfn pages, by creating page tables starting from address
++ * PAGE_OFFSET.
++ */
++static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
++{
++ unsigned long pfn;
++ pgd_t *pgd;
++ pmd_t *pmd;
++ pte_t *pte;
++ int pgd_idx, pmd_idx, pte_ofs;
++
++ unsigned long max_ram_pfn = xen_start_info->nr_pages;
++ if (max_ram_pfn > max_low_pfn)
++ max_ram_pfn = max_low_pfn;
++
++ pgd_idx = pgd_index(PAGE_OFFSET);
++ pgd = pgd_base + pgd_idx;
++ pfn = 0;
++ pmd_idx = pmd_index(PAGE_OFFSET);
++ pte_ofs = pte_index(PAGE_OFFSET);
++
++ for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
++#ifdef CONFIG_XEN
++ /*
++ * Native linux hasn't PAE-paging enabled yet at this
++ * point. When running as xen domain we are in PAE
++ * mode already, thus we can't simply hook a empty
++ * pmd. That would kill the mappings we are currently
++ * using ...
++ */
++ pmd = pmd_offset(pud_offset(pgd, PAGE_OFFSET), PAGE_OFFSET);
++#else
++ pmd = one_md_table_init(pgd);
++#endif
++ if (pfn >= max_low_pfn)
++ continue;
++ pmd += pmd_idx;
++ for (; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
++ unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
++ if (address >= hypervisor_virt_start)
++ continue;
++
++ /* Map with big pages if possible, otherwise create normal page tables. */
++ if (cpu_has_pse) {
++ unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
++
++ if (is_kernel_text(address) || is_kernel_text(address2))
++ set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
++ else
++ set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
++ pfn += PTRS_PER_PTE;
++ } else {
++ pte = one_page_table_init(pmd);
++
++ pte += pte_ofs;
++ for (; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
++ /* XEN: Only map initial RAM allocation. */
++ if ((pfn >= max_ram_pfn) || pte_present(*pte))
++ continue;
++ if (is_kernel_text(address))
++ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
++ else
++ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
++ }
++ pte_ofs = 0;
++ }
++ }
++ pmd_idx = 0;
++ }
++}
++
++#ifndef CONFIG_XEN
++
++static inline int page_kills_ppro(unsigned long pagenr)
++{
++ if (pagenr >= 0x70000 && pagenr <= 0x7003F)
++ return 1;
++ return 0;
++}
++
++#else
++
++#define page_kills_ppro(p) 0
++
++#endif
++
++extern int is_available_memory(efi_memory_desc_t *);
++
++int page_is_ram(unsigned long pagenr)
++{
++ int i;
++ unsigned long addr, end;
++
++ if (efi_enabled) {
++ efi_memory_desc_t *md;
++ void *p;
++
++ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
++ md = p;
++ if (!is_available_memory(md))
++ continue;
++ addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
++ end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
++
++ if ((pagenr >= addr) && (pagenr < end))
++ return 1;
++ }
++ return 0;
++ }
++
++ for (i = 0; i < e820.nr_map; i++) {
++
++ if (e820.map[i].type != E820_RAM) /* not usable memory */
++ continue;
++ /*
++ * !!!FIXME!!! Some BIOSen report areas as RAM that
++ * are not. Notably the 640->1Mb area. We need a sanity
++ * check here.
++ */
++ addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
++ end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
++ if ((pagenr >= addr) && (pagenr < end))
++ return 1;
++ }
++ return 0;
++}
++
++#ifdef CONFIG_HIGHMEM
++pte_t *kmap_pte;
++pgprot_t kmap_prot;
++
++#define kmap_get_fixmap_pte(vaddr) \
++ pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
++
++static void __init kmap_init(void)
++{
++ unsigned long kmap_vstart;
++
++ /* cache the first kmap pte */
++ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
++ kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
++
++ kmap_prot = PAGE_KERNEL;
++}
++
++static void __init permanent_kmaps_init(pgd_t *pgd_base)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++ unsigned long vaddr;
++
++ vaddr = PKMAP_BASE;
++ page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
++
++ pgd = swapper_pg_dir + pgd_index(vaddr);
++ pud = pud_offset(pgd, vaddr);
++ pmd = pmd_offset(pud, vaddr);
++ pte = pte_offset_kernel(pmd, vaddr);
++ pkmap_page_table = pte;
++}
++
++static void __meminit free_new_highpage(struct page *page, int pfn)
++{
++ init_page_count(page);
++ if (pfn < xen_start_info->nr_pages)
++ __free_page(page);
++ totalhigh_pages++;
++}
++
++void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
++{
++ if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
++ ClearPageReserved(page);
++ free_new_highpage(page, pfn);
++ } else
++ SetPageReserved(page);
++}
++
++static int add_one_highpage_hotplug(struct page *page, unsigned long pfn)
++{
++ free_new_highpage(page, pfn);
++ totalram_pages++;
++#ifdef CONFIG_FLATMEM
++ max_mapnr = max(pfn, max_mapnr);
++#endif
++ num_physpages++;
++ return 0;
++}
++
++/*
++ * Not currently handling the NUMA case.
++ * Assuming single node and all memory that
++ * has been added dynamically that would be
++ * onlined here is in HIGHMEM
++ */
++void online_page(struct page *page)
++{
++ ClearPageReserved(page);
++ add_one_highpage_hotplug(page, page_to_pfn(page));
++}
++
++
++#ifdef CONFIG_NUMA
++extern void set_highmem_pages_init(int);
++#else
++static void __init set_highmem_pages_init(int bad_ppro)
++{
++ int pfn;
++ for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
++ add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
++ totalram_pages += totalhigh_pages;
++}
++#endif /* CONFIG_FLATMEM */
++
++#else
++#define kmap_init() do { } while (0)
++#define permanent_kmaps_init(pgd_base) do { } while (0)
++#define set_highmem_pages_init(bad_ppro) do { } while (0)
++#endif /* CONFIG_HIGHMEM */
++
++unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
++EXPORT_SYMBOL(__PAGE_KERNEL);
++unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
++
++#ifdef CONFIG_NUMA
++extern void __init remap_numa_kva(void);
++#else
++#define remap_numa_kva() do {} while (0)
++#endif
++
++pgd_t *swapper_pg_dir;
++
++static void __init pagetable_init (void)
++{
++ unsigned long vaddr;
++ pgd_t *pgd_base = (pgd_t *)xen_start_info->pt_base;
++
++ swapper_pg_dir = pgd_base;
++ init_mm.pgd = pgd_base;
++
++ /* Enable PSE if available */
++ if (cpu_has_pse) {
++ set_in_cr4(X86_CR4_PSE);
++ }
++
++ /* Enable PGE if available */
++ if (cpu_has_pge) {
++ set_in_cr4(X86_CR4_PGE);
++ __PAGE_KERNEL |= _PAGE_GLOBAL;
++ __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
++ }
++
++ kernel_physical_mapping_init(pgd_base);
++ remap_numa_kva();
++
++ /*
++ * Fixed mappings, only the page table structure has to be
++ * created - mappings will be set by set_fixmap():
++ */
++ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
++ page_table_range_init(vaddr, hypervisor_virt_start, pgd_base);
++
++ permanent_kmaps_init(pgd_base);
++}
++
++#if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_ACPI_SLEEP)
++/*
++ * Swap suspend & friends need this for resume because things like the intel-agp
++ * driver might have split up a kernel 4MB mapping.
++ */
++char __nosavedata swsusp_pg_dir[PAGE_SIZE]
++ __attribute__ ((aligned (PAGE_SIZE)));
++
++static inline void save_pg_dir(void)
++{
++ memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
++}
++#else
++static inline void save_pg_dir(void)
++{
++}
++#endif
++
++void zap_low_mappings (void)
++{
++ int i;
++
++ save_pg_dir();
++
++ /*
++ * Zap initial low-memory mappings.
++ *
++ * Note that "pgd_clear()" doesn't do it for
++ * us, because pgd_clear() is a no-op on i386.
++ */
++ for (i = 0; i < USER_PTRS_PER_PGD; i++)
++#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
++ set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
++#else
++ set_pgd(swapper_pg_dir+i, __pgd(0));
++#endif
++ flush_tlb_all();
++}
++
++static int disable_nx __initdata = 0;
++u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
++EXPORT_SYMBOL(__supported_pte_mask);
++
++/*
++ * noexec = on|off
++ *
++ * Control non executable mappings.
++ *
++ * on Enable
++ * off Disable
++ */
++void __init noexec_setup(const char *str)
++{
++ if (!strncmp(str, "on",2) && cpu_has_nx) {
++ __supported_pte_mask |= _PAGE_NX;
++ disable_nx = 0;
++ } else if (!strncmp(str,"off",3)) {
++ disable_nx = 1;
++ __supported_pte_mask &= ~_PAGE_NX;
++ }
++}
++
++int nx_enabled = 0;
++#ifdef CONFIG_X86_PAE
++
++static void __init set_nx(void)
++{
++ unsigned int v[4], l, h;
++
++ if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
++ cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
++ if ((v[3] & (1 << 20)) && !disable_nx) {
++ rdmsr(MSR_EFER, l, h);
++ l |= EFER_NX;
++ wrmsr(MSR_EFER, l, h);
++ nx_enabled = 1;
++ __supported_pte_mask |= _PAGE_NX;
++ }
++ }
++}
++
++/*
++ * Enables/disables executability of a given kernel page and
++ * returns the previous setting.
++ */
++int __init set_kernel_exec(unsigned long vaddr, int enable)
++{
++ pte_t *pte;
++ int ret = 1;
++
++ if (!nx_enabled)
++ goto out;
++
++ pte = lookup_address(vaddr);
++ BUG_ON(!pte);
++
++ if (!pte_exec_kernel(*pte))
++ ret = 0;
++
++ if (enable)
++ pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
++ else
++ pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
++ __flush_tlb_all();
++out:
++ return ret;
++}
++
++#endif
++
++/*
++ * paging_init() sets up the page tables - note that the first 8MB are
++ * already mapped by head.S.
++ *
++ * This routines also unmaps the page at virtual kernel address 0, so
++ * that we can trap those pesky NULL-reference errors in the kernel.
++ */
++void __init paging_init(void)
++{
++ int i;
++
++#ifdef CONFIG_X86_PAE
++ set_nx();
++ if (nx_enabled)
++ printk("NX (Execute Disable) protection: active\n");
++#endif
++
++ pagetable_init();
++
++#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
++ /*
++ * We will bail out later - printk doesn't work right now so
++ * the user would just see a hanging kernel.
++ * when running as xen domain we are already in PAE mode at
++ * this point.
++ */
++ if (cpu_has_pae)
++ set_in_cr4(X86_CR4_PAE);
++#endif
++ __flush_tlb_all();
++
++ kmap_init();
++
++ /* Switch to the real shared_info page, and clear the
++ * dummy page. */
++ set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
++ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
++ memset(empty_zero_page, 0, sizeof(empty_zero_page));
++
++ /* Setup mapping of lower 1st MB */
++ for (i = 0; i < NR_FIX_ISAMAPS; i++)
++ if (is_initial_xendomain())
++ set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
++ else
++ __set_fixmap(FIX_ISAMAP_BEGIN - i,
++ virt_to_machine(empty_zero_page),
++ PAGE_KERNEL_RO);
++}
++
++/*
++ * Test if the WP bit works in supervisor mode. It isn't supported on 386's
++ * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
++ * used to involve black magic jumps to work around some nasty CPU bugs,
++ * but fortunately the switch to using exceptions got rid of all that.
++ */
++
++static void __init test_wp_bit(void)
++{
++ printk("Checking if this processor honours the WP bit even in supervisor mode... ");
++
++ /* Any page-aligned address will do, the test is non-destructive */
++ __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
++ boot_cpu_data.wp_works_ok = do_test_wp_bit();
++ clear_fixmap(FIX_WP_TEST);
++
++ if (!boot_cpu_data.wp_works_ok) {
++ printk("No.\n");
++#ifdef CONFIG_X86_WP_WORKS_OK
++ panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
++#endif
++ } else {
++ printk("Ok.\n");
++ }
++}
++
++static void __init set_max_mapnr_init(void)
++{
++#ifdef CONFIG_HIGHMEM
++ num_physpages = highend_pfn;
++#else
++ num_physpages = max_low_pfn;
++#endif
++#ifdef CONFIG_FLATMEM
++ max_mapnr = num_physpages;
++#endif
++}
++
++static struct kcore_list kcore_mem, kcore_vmalloc;
++
++void __init mem_init(void)
++{
++ extern int ppro_with_ram_bug(void);
++ int codesize, reservedpages, datasize, initsize;
++ int tmp;
++ int bad_ppro;
++ unsigned long pfn;
++
++ contiguous_bitmap = alloc_bootmem_low_pages(
++ (max_low_pfn + 2*BITS_PER_LONG) >> 3);
++ BUG_ON(!contiguous_bitmap);
++ memset(contiguous_bitmap, 0, (max_low_pfn + 2*BITS_PER_LONG) >> 3);
++
++#if defined(CONFIG_SWIOTLB)
++ swiotlb_init();
++#endif
++
++#ifdef CONFIG_FLATMEM
++ if (!mem_map)
++ BUG();
++#endif
++
++ bad_ppro = ppro_with_ram_bug();
++
++#ifdef CONFIG_HIGHMEM
++ /* check that fixmap and pkmap do not overlap */
++ if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
++ printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
++ printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
++ PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
++ BUG();
++ }
++#endif
++
++ set_max_mapnr_init();
++
++#ifdef CONFIG_HIGHMEM
++ high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
++#else
++ high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
++#endif
++ printk("vmalloc area: %lx-%lx, maxmem %lx\n",
++ VMALLOC_START,VMALLOC_END,MAXMEM);
++ BUG_ON(VMALLOC_START > VMALLOC_END);
++
++ /* this will put all low memory onto the freelists */
++ totalram_pages += free_all_bootmem();
++ /* XEN: init and count low-mem pages outside initial allocation. */
++ for (pfn = xen_start_info->nr_pages; pfn < max_low_pfn; pfn++) {
++ ClearPageReserved(pfn_to_page(pfn));
++ init_page_count(pfn_to_page(pfn));
++ totalram_pages++;
++ }
++
++ reservedpages = 0;
++ for (tmp = 0; tmp < max_low_pfn; tmp++)
++ /*
++ * Only count reserved RAM pages
++ */
++ if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
++ reservedpages++;
++
++ set_highmem_pages_init(bad_ppro);
++
++ codesize = (unsigned long) &_etext - (unsigned long) &_text;
++ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
++ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
++
++ kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
++ kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
++ VMALLOC_END-VMALLOC_START);
++
++ printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
++ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
++ num_physpages << (PAGE_SHIFT-10),
++ codesize >> 10,
++ reservedpages << (PAGE_SHIFT-10),
++ datasize >> 10,
++ initsize >> 10,
++ (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
++ );
++
++#ifdef CONFIG_X86_PAE
++ if (!cpu_has_pae)
++ panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
++#endif
++ if (boot_cpu_data.wp_works_ok < 0)
++ test_wp_bit();
++
++ /*
++ * Subtle. SMP is doing it's boot stuff late (because it has to
++ * fork idle threads) - but it also needs low mappings for the
++ * protected-mode entry to work. We zap these entries only after
++ * the WP-bit has been tested.
++ */
++#ifndef CONFIG_SMP
++ zap_low_mappings();
++#endif
++
++ set_bit(PG_pinned, &virt_to_page(init_mm.pgd)->flags);
++}
++
++/*
++ * this is for the non-NUMA, s