Commit 709d9f54 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-vmware-for-linus' of...

Merge branch 'x86-vmware-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-vmware-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86, paravirt: Remove alloc_pmd_clone hook, only used by VMI
  x86, vmware: Remove deprecated VMI kernel support

Fix up trivial #include conflict in arch/x86/kernel/smpboot.c
parents cca8209e b0f4c062
...@@ -386,34 +386,6 @@ Who: Tejun Heo <tj@kernel.org> ...@@ -386,34 +386,6 @@ Who: Tejun Heo <tj@kernel.org>
---------------------------- ----------------------------
What: Support for VMware's guest paravirtuliazation technique [VMI] will be
dropped.
When: 2.6.37 or earlier.
Why: With the recent innovations in CPU hardware acceleration technologies
from Intel and AMD, VMware ran a few experiments to compare these
techniques to guest paravirtualization technique on VMware's platform.
These hardware assisted virtualization techniques have outperformed the
performance benefits provided by VMI in most of the workloads. VMware
expects that these hardware features will be ubiquitous in a couple of
years, as a result, VMware has started a phased retirement of this
feature from the hypervisor. We will be removing this feature from the
Kernel too. Right now we are targeting 2.6.37 but can retire earlier if
technical reasons (read opportunity to remove major chunk of pvops)
arise.
Please note that VMI has always been an optimization and non-VMI kernels
still work fine on VMware's platform.
Latest versions of VMware's product which support VMI are,
Workstation 7.0 and VSphere 4.0 on ESX side, future maintainence
releases for these products will continue supporting VMI.
For more details about VMI retirement take a look at this,
http://blogs.vmware.com/guestosguide/2009/09/vmi-retirement.html
Who: Alok N Kataria <akataria@vmware.com>
----------------------------
What: Support for lcd_switch and display_get in asus-laptop driver What: Support for lcd_switch and display_get in asus-laptop driver
When: March 2010 When: March 2010
Why: These two features use non-standard interfaces. There are the Why: These two features use non-standard interfaces. There are the
......
...@@ -455,7 +455,7 @@ and is between 256 and 4096 characters. It is defined in the file ...@@ -455,7 +455,7 @@ and is between 256 and 4096 characters. It is defined in the file
[ARM] imx_timer1,OSTS,netx_timer,mpu_timer2, [ARM] imx_timer1,OSTS,netx_timer,mpu_timer2,
pxa_timer,timer3,32k_counter,timer0_1 pxa_timer,timer3,32k_counter,timer0_1
[AVR32] avr32 [AVR32] avr32
[X86-32] pit,hpet,tsc,vmi-timer; [X86-32] pit,hpet,tsc;
scx200_hrt on Geode; cyclone on IBM x440 scx200_hrt on Geode; cyclone on IBM x440
[MIPS] MIPS [MIPS] MIPS
[PARISC] cr16 [PARISC] cr16
......
...@@ -521,25 +521,6 @@ if PARAVIRT_GUEST ...@@ -521,25 +521,6 @@ if PARAVIRT_GUEST
source "arch/x86/xen/Kconfig" source "arch/x86/xen/Kconfig"
config VMI
bool "VMI Guest support (DEPRECATED)"
select PARAVIRT
depends on X86_32
---help---
VMI provides a paravirtualized interface to the VMware ESX server
(it could be used by other hypervisors in theory too, but is not
at the moment), by linking the kernel to a GPL-ed ROM module
provided by the hypervisor.
As of September 2009, VMware has started a phased retirement
of this feature from VMware's products. Please see
feature-removal-schedule.txt for details. If you are
planning to enable this option, please note that you cannot
live migrate a VMI enabled VM to a future VMware product,
which doesn't support VMI. So if you expect your kernel to
seamlessly migrate to newer VMware products, keep this
disabled.
config KVM_CLOCK config KVM_CLOCK
bool "KVM paravirtualized clock" bool "KVM paravirtualized clock"
select PARAVIRT select PARAVIRT
......
...@@ -416,11 +416,6 @@ static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) ...@@ -416,11 +416,6 @@ static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn); PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
} }
static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
unsigned long start, unsigned long count)
{
PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
}
static inline void paravirt_release_pmd(unsigned long pfn) static inline void paravirt_release_pmd(unsigned long pfn)
{ {
PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn); PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
......
...@@ -255,7 +255,6 @@ struct pv_mmu_ops { ...@@ -255,7 +255,6 @@ struct pv_mmu_ops {
*/ */
void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn); void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn); void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn); void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
void (*release_pte)(unsigned long pfn); void (*release_pte)(unsigned long pfn);
void (*release_pmd)(unsigned long pfn); void (*release_pmd)(unsigned long pfn);
......
/*
* VMI interface definition
*
* Copyright (C) 2005, VMware, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Maintained by: Zachary Amsden zach@vmware.com
*
*/
#include <linux/types.h>
/*
*---------------------------------------------------------------------
*
* VMI Option ROM API
*
*---------------------------------------------------------------------
*/
#define VMI_SIGNATURE 0x696d5663 /* "cVmi" */
#define PCI_VENDOR_ID_VMWARE 0x15AD
#define PCI_DEVICE_ID_VMWARE_VMI 0x0801
/*
* We use two version numbers for compatibility, with the major
* number signifying interface breakages, and the minor number
* interface extensions.
*/
#define VMI_API_REV_MAJOR 3
#define VMI_API_REV_MINOR 0
#define VMI_CALL_CPUID 0
#define VMI_CALL_WRMSR 1
#define VMI_CALL_RDMSR 2
#define VMI_CALL_SetGDT 3
#define VMI_CALL_SetLDT 4
#define VMI_CALL_SetIDT 5
#define VMI_CALL_SetTR 6
#define VMI_CALL_GetGDT 7
#define VMI_CALL_GetLDT 8
#define VMI_CALL_GetIDT 9
#define VMI_CALL_GetTR 10
#define VMI_CALL_WriteGDTEntry 11
#define VMI_CALL_WriteLDTEntry 12
#define VMI_CALL_WriteIDTEntry 13
#define VMI_CALL_UpdateKernelStack 14
#define VMI_CALL_SetCR0 15
#define VMI_CALL_SetCR2 16
#define VMI_CALL_SetCR3 17
#define VMI_CALL_SetCR4 18
#define VMI_CALL_GetCR0 19
#define VMI_CALL_GetCR2 20
#define VMI_CALL_GetCR3 21
#define VMI_CALL_GetCR4 22
#define VMI_CALL_WBINVD 23
#define VMI_CALL_SetDR 24
#define VMI_CALL_GetDR 25
#define VMI_CALL_RDPMC 26
#define VMI_CALL_RDTSC 27
#define VMI_CALL_CLTS 28
#define VMI_CALL_EnableInterrupts 29
#define VMI_CALL_DisableInterrupts 30
#define VMI_CALL_GetInterruptMask 31
#define VMI_CALL_SetInterruptMask 32
#define VMI_CALL_IRET 33
#define VMI_CALL_SYSEXIT 34
#define VMI_CALL_Halt 35
#define VMI_CALL_Reboot 36
#define VMI_CALL_Shutdown 37
#define VMI_CALL_SetPxE 38
#define VMI_CALL_SetPxELong 39
#define VMI_CALL_UpdatePxE 40
#define VMI_CALL_UpdatePxELong 41
#define VMI_CALL_MachineToPhysical 42
#define VMI_CALL_PhysicalToMachine 43
#define VMI_CALL_AllocatePage 44
#define VMI_CALL_ReleasePage 45
#define VMI_CALL_InvalPage 46
#define VMI_CALL_FlushTLB 47
#define VMI_CALL_SetLinearMapping 48
#define VMI_CALL_SetIOPLMask 61
#define VMI_CALL_SetInitialAPState 62
#define VMI_CALL_APICWrite 63
#define VMI_CALL_APICRead 64
#define VMI_CALL_IODelay 65
#define VMI_CALL_SetLazyMode 73
/*
*---------------------------------------------------------------------
*
* MMU operation flags
*
*---------------------------------------------------------------------
*/
/* Flags used by VMI_{Allocate|Release}Page call */
#define VMI_PAGE_PAE 0x10 /* Allocate PAE shadow */
#define VMI_PAGE_CLONE 0x20 /* Clone from another shadow */
#define VMI_PAGE_ZEROED 0x40 /* Page is pre-zeroed */
/* Flags shared by Allocate|Release Page and PTE updates */
#define VMI_PAGE_PT 0x01
#define VMI_PAGE_PD 0x02
#define VMI_PAGE_PDP 0x04
#define VMI_PAGE_PML4 0x08
#define VMI_PAGE_NORMAL 0x00 /* for debugging */
/* Flags used by PTE updates */
#define VMI_PAGE_CURRENT_AS 0x10 /* implies VMI_PAGE_VA_MASK is valid */
#define VMI_PAGE_DEFER 0x20 /* may queue update until TLB inval */
#define VMI_PAGE_VA_MASK 0xfffff000
#ifdef CONFIG_X86_PAE
#define VMI_PAGE_L1 (VMI_PAGE_PT | VMI_PAGE_PAE | VMI_PAGE_ZEROED)
#define VMI_PAGE_L2 (VMI_PAGE_PD | VMI_PAGE_PAE | VMI_PAGE_ZEROED)
#else
#define VMI_PAGE_L1 (VMI_PAGE_PT | VMI_PAGE_ZEROED)
#define VMI_PAGE_L2 (VMI_PAGE_PD | VMI_PAGE_ZEROED)
#endif
/* Flags used by VMI_FlushTLB call */
#define VMI_FLUSH_TLB 0x01
#define VMI_FLUSH_GLOBAL 0x02
/*
*---------------------------------------------------------------------
*
* VMI relocation definitions for ROM call get_reloc
*
*---------------------------------------------------------------------
*/
/* VMI Relocation types */
#define VMI_RELOCATION_NONE 0
#define VMI_RELOCATION_CALL_REL 1
#define VMI_RELOCATION_JUMP_REL 2
#define VMI_RELOCATION_NOP 3
#ifndef __ASSEMBLY__
struct vmi_relocation_info {
unsigned char *eip;
unsigned char type;
unsigned char reserved[3];
};
#endif
/*
*---------------------------------------------------------------------
*
* Generic ROM structures and definitions
*
*---------------------------------------------------------------------
*/
#ifndef __ASSEMBLY__
struct vrom_header {
u16 rom_signature; /* option ROM signature */
u8 rom_length; /* ROM length in 512 byte chunks */
u8 rom_entry[4]; /* 16-bit code entry point */
u8 rom_pad0; /* 4-byte align pad */
u32 vrom_signature; /* VROM identification signature */
u8 api_version_min;/* Minor version of API */
u8 api_version_maj;/* Major version of API */
u8 jump_slots; /* Number of jump slots */
u8 reserved1; /* Reserved for expansion */
u32 virtual_top; /* Hypervisor virtual address start */
u16 reserved2; /* Reserved for expansion */
u16 license_offs; /* Offset to License string */
u16 pci_header_offs;/* Offset to PCI OPROM header */
u16 pnp_header_offs;/* Offset to PnP OPROM header */
u32 rom_pad3; /* PnP reserverd / VMI reserved */
u8 reserved[96]; /* Reserved for headers */
char vmi_init[8]; /* VMI_Init jump point */
char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
} __attribute__((packed));
struct pnp_header {
char sig[4];
char rev;
char size;
short next;
short res;
long devID;
unsigned short manufacturer_offset;
unsigned short product_offset;
} __attribute__((packed));
struct pci_header {
char sig[4];
short vendorID;
short deviceID;
short vpdData;
short size;
char rev;
char class;
char subclass;
char interface;
short chunks;
char rom_version_min;
char rom_version_maj;
char codetype;
char lastRom;
short reserved;
} __attribute__((packed));
/* Function prototypes for bootstrapping */
#ifdef CONFIG_VMI
extern void vmi_init(void);
extern void vmi_activate(void);
extern void vmi_bringup(void);
#else
static inline void vmi_init(void) {}
static inline void vmi_activate(void) {}
static inline void vmi_bringup(void) {}
#endif
/* State needed to start an application processor in an SMP system. */
struct vmi_ap_state {
u32 cr0;
u32 cr2;
u32 cr3;
u32 cr4;
u64 efer;
u32 eip;
u32 eflags;
u32 eax;
u32 ebx;
u32 ecx;
u32 edx;
u32 esp;
u32 ebp;
u32 esi;
u32 edi;
u16 cs;
u16 ss;
u16 ds;
u16 es;
u16 fs;
u16 gs;
u16 ldtr;
u16 gdtr_limit;
u32 gdtr_base;
u32 idtr_base;
u16 idtr_limit;
};
#endif
/*
* VMI Time wrappers
*
* Copyright (C) 2006, VMware, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Send feedback to dhecht@vmware.com
*
*/
#ifndef _ASM_X86_VMI_TIME_H
#define _ASM_X86_VMI_TIME_H
/*
* Raw VMI call indices for timer functions
*/
#define VMI_CALL_GetCycleFrequency 66
#define VMI_CALL_GetCycleCounter 67
#define VMI_CALL_SetAlarm 68
#define VMI_CALL_CancelAlarm 69
#define VMI_CALL_GetWallclockTime 70
#define VMI_CALL_WallclockUpdated 71
/* Cached VMI timer operations */
extern struct vmi_timer_ops {
u64 (*get_cycle_frequency)(void);
u64 (*get_cycle_counter)(int);
u64 (*get_wallclock)(void);
int (*wallclock_updated)(void);
void (*set_alarm)(u32 flags, u64 expiry, u64 period);
void (*cancel_alarm)(u32 flags);
} vmi_timer_ops;
/* Prototypes */
extern void __init vmi_time_init(void);
extern unsigned long vmi_get_wallclock(void);
extern int vmi_set_wallclock(unsigned long now);
extern unsigned long long vmi_sched_clock(void);
extern unsigned long vmi_tsc_khz(void);
#ifdef CONFIG_X86_LOCAL_APIC
extern void __devinit vmi_time_bsp_init(void);
extern void __devinit vmi_time_ap_init(void);
#endif
/*
* When run under a hypervisor, a vcpu is always in one of three states:
* running, halted, or ready. The vcpu is in the 'running' state if it
* is executing. When the vcpu executes the halt interface, the vcpu
* enters the 'halted' state and remains halted until there is some work
* pending for the vcpu (e.g. an alarm expires, host I/O completes on
* behalf of virtual I/O). At this point, the vcpu enters the 'ready'
* state (waiting for the hypervisor to reschedule it). Finally, at any
* time when the vcpu is not in the 'running' state nor the 'halted'
* state, it is in the 'ready' state.
*
* Real time is advances while the vcpu is 'running', 'ready', or
* 'halted'. Stolen time is the time in which the vcpu is in the
* 'ready' state. Available time is the remaining time -- the vcpu is
* either 'running' or 'halted'.
*
* All three views of time are accessible through the VMI cycle
* counters.
*/
/* The cycle counters. */
#define VMI_CYCLES_REAL 0
#define VMI_CYCLES_AVAILABLE 1
#define VMI_CYCLES_STOLEN 2
/* The alarm interface 'flags' bits */
#define VMI_ALARM_COUNTERS 2
#define VMI_ALARM_COUNTER_MASK 0x000000ff
#define VMI_ALARM_WIRED_IRQ0 0x00000000
#define VMI_ALARM_WIRED_LVTT 0x00010000
#define VMI_ALARM_IS_ONESHOT 0x00000000
#define VMI_ALARM_IS_PERIODIC 0x00000100
#define CONFIG_VMI_ALARM_HZ 100
#endif /* _ASM_X86_VMI_TIME_H */
...@@ -95,7 +95,6 @@ obj-$(CONFIG_AMD_NB) += amd_nb.o ...@@ -95,7 +95,6 @@ obj-$(CONFIG_AMD_NB) += amd_nb.o
obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o
obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o
obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o
obj-$(CONFIG_KVM_GUEST) += kvm.o obj-$(CONFIG_KVM_GUEST) += kvm.o
obj-$(CONFIG_KVM_CLOCK) += kvmclock.o obj-$(CONFIG_KVM_CLOCK) += kvmclock.o
obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o
......
...@@ -413,7 +413,6 @@ struct pv_mmu_ops pv_mmu_ops = { ...@@ -413,7 +413,6 @@ struct pv_mmu_ops pv_mmu_ops = {
.alloc_pte = paravirt_nop, .alloc_pte = paravirt_nop,
.alloc_pmd = paravirt_nop, .alloc_pmd = paravirt_nop,
.alloc_pmd_clone = paravirt_nop,
.alloc_pud = paravirt_nop, .alloc_pud = paravirt_nop,
.release_pte = paravirt_nop, .release_pte = paravirt_nop,
.release_pmd = paravirt_nop, .release_pmd = paravirt_nop,
......
...@@ -83,7 +83,6 @@ ...@@ -83,7 +83,6 @@
#include <asm/dmi.h> #include <asm/dmi.h>
#include <asm/io_apic.h> #include <asm/io_apic.h>
#include <asm/ist.h> #include <asm/ist.h>
#include <asm/vmi.h>
#include <asm/setup_arch.h> #include <asm/setup_arch.h>
#include <asm/bios_ebda.h> #include <asm/bios_ebda.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -691,10 +690,10 @@ void __init setup_arch(char **cmdline_p) ...@@ -691,10 +690,10 @@ void __init setup_arch(char **cmdline_p)
printk(KERN_INFO "Command line: %s\n", boot_command_line); printk(KERN_INFO "Command line: %s\n", boot_command_line);
#endif #endif
/* VMI may relocate the fixmap; do this before touching ioremap area */ /*
vmi_init(); * If we have OLPC OFW, we might end up relocating the fixmap due to
* reserve_top(), so do this before touching the ioremap area.
/* OFW also may relocate the fixmap */ */
olpc_ofw_detect(); olpc_ofw_detect();
early_trap_init(); early_trap_init();
...@@ -795,9 +794,6 @@ void __init setup_arch(char **cmdline_p) ...@@ -795,9 +794,6 @@ void __init setup_arch(char **cmdline_p)
x86_report_nx(); x86_report_nx();
/* Must be before kernel pagetables are setup */
vmi_activate();
/* after early param, so could get panic from serial */ /* after early param, so could get panic from serial */
reserve_early_setup_data(); reserve_early_setup_data();
......
...@@ -63,7 +63,6 @@ ...@@ -63,7 +63,6 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/mtrr.h> #include <asm/mtrr.h>
#include <asm/mwait.h> #include <asm/mwait.h>
#include <asm/vmi.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/uv/uv.h> #include <asm/uv/uv.h>
...@@ -312,7 +311,6 @@ notrace static void __cpuinit start_secondary(void *unused) ...@@ -312,7 +311,6 @@ notrace static void __cpuinit start_secondary(void *unused)
__flush_tlb_all(); __flush_tlb_all();
#endif #endif
vmi_bringup();
cpu_init(); cpu_init();
preempt_disable(); preempt_disable();
smp_callin(); smp_callin();
......
This diff is collapsed.
/*
* VMI paravirtual timer support routines.
*
* Copyright (C) 2007, VMware, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/cpumask.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <asm/vmi.h>
#include <asm/vmi_time.h>
#include <asm/apicdef.h>
#include <asm/apic.h>
#include <asm/timer.h>
#include <asm/i8253.h>
#include <asm/irq_vectors.h>
#define VMI_ONESHOT (VMI_ALARM_IS_ONESHOT | VMI_CYCLES_REAL | vmi_get_alarm_wiring())
#define VMI_PERIODIC (VMI_ALARM_IS_PERIODIC | VMI_CYCLES_REAL | vmi_get_alarm_wiring())
static DEFINE_PER_CPU(struct clock_event_device, local_events);
static inline u32 vmi_counter(u32 flags)
{
/* Given VMI_ONESHOT or VMI_PERIODIC, return the corresponding
* cycle counter. */
return flags & VMI_ALARM_COUNTER_MASK;
}
/* paravirt_ops.get_wallclock = vmi_get_wallclock */
unsigned long vmi_get_wallclock(void)
{
unsigned long long wallclock;
wallclock = vmi_timer_ops.get_wallclock(); // nsec
(void)do_div(wallclock, 1000000000); // sec
return wallclock;
}
/* paravirt_ops.set_wallclock = vmi_set_wallclock */
int vmi_set_wallclock(unsigned long now)
{
return 0;
}
/* paravirt_ops.sched_clock = vmi_sched_clock */
unsigned long long vmi_sched_clock(void)
{
return cycles_2_ns(vmi_timer_ops.get_cycle_counter(VMI_CYCLES_AVAILABLE));
}
/* x86_platform.calibrate_tsc = vmi_tsc_khz */
unsigned long vmi_tsc_khz(void)
{
unsigned long long khz;
khz = vmi_timer_ops.get_cycle_frequency();
(void)do_div(khz, 1000);
return khz;
}
static inline unsigned int vmi_get_timer_vector(void)
{
return IRQ0_VECTOR;
}
/** vmi clockchip */
#ifdef CONFIG_X86_LOCAL_APIC
static unsigned int startup_timer_irq(unsigned int irq)
{
unsigned long val = apic_read(APIC_LVTT);
apic_write(APIC_LVTT, vmi_get_timer_vector());
return (val & APIC_SEND_PENDING);
}
static void mask_timer_irq(unsigned int irq)
{
unsigned long val = apic_read(APIC_LVTT);
apic_write(APIC_LVTT, val | APIC_LVT_MASKED);
}
static void unmask_timer_irq(unsigned int irq)
{
unsigned long val = apic_read(APIC_LVTT);
apic_write(APIC_LVTT, val & ~APIC_LVT_MASKED);
}
static void ack_timer_irq(unsigned int irq)
{
ack_APIC_irq();
}
static struct irq_chip vmi_chip __read_mostly = {
.name = "VMI-LOCAL",
.startup = startup_timer_irq,
.mask = mask_timer_irq,
.unmask = unmask_timer_irq,
.ack = ack_timer_irq
};
#endif
/** vmi clockevent */
#define VMI_ALARM_WIRED_IRQ0 0x00000000
#define VMI_ALARM_WIRED_LVTT 0x00010000
static int vmi_wiring = VMI_ALARM_WIRED_IRQ0;
static inline int vmi_get_alarm_wiring(void)
{
return vmi_wiring;
}
static void vmi_timer_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
cycle_t now, cycles_per_hz;
BUG_ON(!irqs_disabled());
switch (mode) {
case CLOCK_EVT_MODE_ONESHOT:
case CLOCK_EVT_MODE_RESUME:
break;
case CLOCK_EVT_MODE_PERIODIC:
cycles_per_hz = vmi_timer_ops.get_cycle_frequency();
(void)do_div(cycles_per_hz, HZ);
now = vmi_timer_ops.get_cycle_counter(vmi_counter(VMI_PERIODIC));
vmi_timer_ops.set_alarm(VMI_PERIODIC, now, cycles_per_hz);
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
switch (evt->mode) {
case CLOCK_EVT_MODE_ONESHOT:
vmi_timer_ops.cancel_alarm(VMI_ONESHOT);
break;
case CLOCK_EVT_MODE_PERIODIC:
vmi_timer_ops.cancel_alarm(VMI_PERIODIC);
break;
default:
break;
}
break;
default:
break;
}
}
static int vmi_timer_next_event(unsigned long delta,
struct clock_event_device *evt)
{
/* Unfortunately, set_next_event interface only passes relative
* expiry, but we want absolute expiry. It'd be better if were
* were passed an absolute expiry, since a bunch of time may
* have been stolen between the time the delta is computed and
* when we set the alarm below. */
cycle_t now = vmi_timer_ops.get_cycle_counter(vmi_counter(VMI_ONESHOT));
BUG_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
vmi_timer_ops.set_alarm(VMI_ONESHOT, now + delta, 0);
return 0;
}
static struct clock_event_device vmi_clockevent = {
.name = "vmi-timer",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.shift = 22,
.set_mode = vmi_timer_set_mode,
.set_next_event = vmi_timer_next_event,
.rating = 1000,
.irq = 0,
};
static irqreturn_t vmi_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = &__get_cpu_var(local_events);
evt->event_handler(evt);
return IRQ_HANDLED;
}
static struct irqaction vmi_clock_action = {
.name = "vmi-timer",
.handler = vmi_timer_interrupt,
.flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER,
};
static void __devinit vmi_time_init_clockevent(void)
{
cycle_t cycles_per_msec;
struct clock_event_device *evt;
int cpu = smp_processor_id();
evt = &__get_cpu_var(local_events);
/* Use cycles_per_msec since div_sc params are 32-bits. */
cycles_per_msec = vmi_timer_ops.get_cycle_frequency();
(void)do_div(cycles_per_msec, 1000);
memcpy(evt, &vmi_clockevent, sizeof(*evt));
/* Must pick .shift such that .mult fits in 32-bits. Choosing
* .shift to be 22 allows 2^(32-22) cycles per nano-seconds
* before overflow. */
evt->mult = div_sc(cycles_per_msec, NSEC_PER_MSEC, evt->shift);
/* Upper bound is clockevent's use of ulong for cycle deltas. */
evt->max_delta_ns = clockevent_delta2ns(ULONG_MAX, evt);
evt->min_delta_ns = clockevent_delta2ns(1, evt);
evt->cpumask = cpumask_of(cpu);
printk(KERN_WARNING "vmi: registering clock event %s. mult=%u shift=%u\n",
evt->name, evt->mult, evt->shift);
clockevents_register_device(evt);
}
void __init vmi_time_init(void)
{
unsigned int cpu;
/* Disable PIT: BIOSes start PIT CH0 with 18.2hz peridic. */
outb_pit(0x3a, PIT_MODE); /* binary, mode 5, LSB/MSB, ch 0 */
vmi_time_init_clockevent();
setup_irq(0, &vmi_clock_action);
for_each_possible_cpu(cpu)
per_cpu(vector_irq, cpu)[vmi_get_timer_vector()] = 0;
}
#ifdef CONFIG_X86_LOCAL_APIC
void __devinit vmi_time_bsp_init(void)
{
/*
* On APIC systems, we want local timers to fire on each cpu. We do
* this by programming LVTT to deliver timer events to the IRQ handler
* for IRQ-0, since we can't re-use the APIC local timer handler
* without interfering with that code.
*/
clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
local_irq_disable();
#ifdef CONFIG_SMP
/*
* XXX handle_percpu_irq only defined for SMP; we need to switch over
* to using it, since this is a local interrupt, which each CPU must
* handle individually without locking out or dropping simultaneous
* local timers on other CPUs. We also don't want to trigger the
* quirk workaround code for interrupts which gets invoked from
* handle_percpu_irq via eoi, so we use our own IRQ chip.
*/
set_irq_chip_and_handler_name(0, &vmi_chip, handle_percpu_irq, "lvtt");
#else
set_irq_chip_and_handler_name(0, &vmi_chip, handle_edge_irq, "lvtt");
#endif
vmi_wiring = VMI_ALARM_WIRED_LVTT;
apic_write(APIC_LVTT, vmi_get_timer_vector());
local_irq_enable();
clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
}
void __devinit vmi_time_ap_init(void)
{
vmi_time_init_clockevent();
apic_write(APIC_LVTT, vmi_get_timer_vector());
}
#endif
/** vmi clocksource */
static struct clocksource clocksource_vmi;
static cycle_t read_real_cycles(struct clocksource *cs)
{
cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL);
return max(ret, clocksource_vmi.cycle_last);
}
static struct clocksource clocksource_vmi = {
.name = "vmi-timer",
.rating = 450,
.read = read_real_cycles,
.mask = CLOCKSOURCE_MASK(64),
.mult = 0, /* to be set */
.shift = 22,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static int __init init_vmi_clocksource(void)
{
cycle_t cycles_per_msec;
if (!vmi_timer_ops.get_cycle_frequency)
return 0;
/* Use khz2mult rather than hz2mult since hz arg is only 32-bits. */
cycles_per_msec = vmi_timer_ops.get_cycle_frequency();
(void)do_div(cycles_per_msec, 1000);
/* Note that clocksource.{mult, shift} converts in the opposite direction
* as clockevents. */
clocksource_vmi.mult = clocksource_khz2mult(cycles_per_msec,
clocksource_vmi.shift);
printk(KERN_WARNING "vmi: registering clock source khz=%lld\n", cycles_per_msec);
return clocksource_register(&clocksource_vmi);
}
module_init(init_vmi_clocksource);
...@@ -110,10 +110,6 @@ static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) ...@@ -110,10 +110,6 @@ static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
swapper_pg_dir + KERNEL_PGD_BOUNDARY, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
KERNEL_PGD_PTRS); KERNEL_PGD_PTRS);
paravirt_alloc_pmd_clone(__pa(pgd) >> PAGE_SHIFT,
__pa(swapper_pg_dir) >> PAGE_SHIFT,
KERNEL_PGD_BOUNDARY,
KERNEL_PGD_PTRS);
} }
/* list required to sync kernel mapping updates */ /* list required to sync kernel mapping updates */
......
...@@ -1969,7 +1969,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { ...@@ -1969,7 +1969,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
.alloc_pte = xen_alloc_pte_init, .alloc_pte = xen_alloc_pte_init,
.release_pte = xen_release_pte_init, .release_pte = xen_release_pte_init,
.alloc_pmd = xen_alloc_pmd_init, .alloc_pmd = xen_alloc_pmd_init,
.alloc_pmd_clone = paravirt_nop,
.release_pmd = xen_release_pmd_init, .release_pmd = xen_release_pmd_init,
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment