Commit 4f7dbc7f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-fixes-for-linus' of...

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86: more general identifier for Phoenix BIOS
  AMD IOMMU: check for next_bit also in unmapped area
  AMD IOMMU: fix fullflush comparison length
  AMD IOMMU: enable device isolation per default
  AMD IOMMU: add parameter to disable device isolation
  x86, PEBS/DS: fix code flow in ds_request()
  x86: add rdtsc barrier to TSC sync check
  xen: fix scrub_page()
  x86: fix es7000 compiling
  x86, bts: fix unlock problem in ds.c
  x86, voyager: fix smp generic helper voyager breakage
  x86: move iomap.h to the new include location
parents 9f92f471 73f56c0d
...@@ -294,7 +294,9 @@ and is between 256 and 4096 characters. It is defined in the file ...@@ -294,7 +294,9 @@ and is between 256 and 4096 characters. It is defined in the file
Possible values are: Possible values are:
isolate - enable device isolation (each device, as far isolate - enable device isolation (each device, as far
as possible, will get its own protection as possible, will get its own protection
domain) domain) [default]
share - put every device behind one IOMMU into the
same protection domain
fullflush - enable flushing of IO/TLB entries when fullflush - enable flushing of IO/TLB entries when
they are unmapped. Otherwise they are they are unmapped. Otherwise they are
flushed before they will be reused, which flushed before they will be reused, which
......
...@@ -167,9 +167,12 @@ config GENERIC_PENDING_IRQ ...@@ -167,9 +167,12 @@ config GENERIC_PENDING_IRQ
config X86_SMP config X86_SMP
bool bool
depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64) depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)
select USE_GENERIC_SMP_HELPERS
default y default y
config USE_GENERIC_SMP_HELPERS
def_bool y
depends on SMP
config X86_32_SMP config X86_32_SMP
def_bool y def_bool y
depends on X86_32 && SMP depends on X86_32 && SMP
......
...@@ -537,7 +537,7 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom, ...@@ -537,7 +537,7 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
address >>= PAGE_SHIFT; address >>= PAGE_SHIFT;
iommu_area_free(dom->bitmap, address, pages); iommu_area_free(dom->bitmap, address, pages);
if (address + pages >= dom->next_bit) if (address >= dom->next_bit)
dom->need_flush = true; dom->need_flush = true;
} }
......
...@@ -121,7 +121,7 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have ...@@ -121,7 +121,7 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have
LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
we find in ACPI */ we find in ACPI */
unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */ unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */
int amd_iommu_isolate; /* if 1, device isolation is enabled */ int amd_iommu_isolate = 1; /* if 1, device isolation is enabled */
bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
...@@ -1213,7 +1213,9 @@ static int __init parse_amd_iommu_options(char *str) ...@@ -1213,7 +1213,9 @@ static int __init parse_amd_iommu_options(char *str)
for (; *str; ++str) { for (; *str; ++str) {
if (strncmp(str, "isolate", 7) == 0) if (strncmp(str, "isolate", 7) == 0)
amd_iommu_isolate = 1; amd_iommu_isolate = 1;
if (strncmp(str, "fullflush", 11) == 0) if (strncmp(str, "share", 5) == 0)
amd_iommu_isolate = 0;
if (strncmp(str, "fullflush", 9) == 0)
amd_iommu_unmap_flush = true; amd_iommu_unmap_flush = true;
} }
......
...@@ -236,17 +236,33 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task) ...@@ -236,17 +236,33 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task)
struct ds_context *context = *p_context; struct ds_context *context = *p_context;
if (!context) { if (!context) {
spin_unlock(&ds_lock);
context = kzalloc(sizeof(*context), GFP_KERNEL); context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context) if (!context) {
spin_lock(&ds_lock);
return NULL; return NULL;
}
context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL); context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL);
if (!context->ds) { if (!context->ds) {
kfree(context); kfree(context);
spin_lock(&ds_lock);
return NULL; return NULL;
} }
spin_lock(&ds_lock);
/*
* Check for race - another CPU could have allocated
* it meanwhile:
*/
if (*p_context) {
kfree(context->ds);
kfree(context);
return *p_context;
}
*p_context = context; *p_context = context;
context->this = p_context; context->this = p_context;
...@@ -384,14 +400,15 @@ static int ds_request(struct task_struct *task, void *base, size_t size, ...@@ -384,14 +400,15 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
spin_lock(&ds_lock); spin_lock(&ds_lock);
if (!check_tracer(task))
return -EPERM;
error = -ENOMEM; error = -ENOMEM;
context = ds_alloc_context(task); context = ds_alloc_context(task);
if (!context) if (!context)
goto out_unlock; goto out_unlock;
error = -EPERM;
if (!check_tracer(task))
goto out_unlock;
error = -EALREADY; error = -EALREADY;
if (context->owner[qual] == current) if (context->owner[qual] == current)
goto out_unlock; goto out_unlock;
......
...@@ -250,31 +250,24 @@ int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) ...@@ -250,31 +250,24 @@ int __init find_unisys_acpi_oem_table(unsigned long *oem_addr)
{ {
struct acpi_table_header *header = NULL; struct acpi_table_header *header = NULL;
int i = 0; int i = 0;
acpi_size tbl_size;
while (ACPI_SUCCESS(acpi_get_table_with_size("OEM1", i++, &header, &tbl_size))) { while (ACPI_SUCCESS(acpi_get_table("OEM1", i++, &header))) {
if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) { if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) {
struct oem_table *t = (struct oem_table *)header; struct oem_table *t = (struct oem_table *)header;
oem_addrX = t->OEMTableAddr; oem_addrX = t->OEMTableAddr;
oem_size = t->OEMTableSize; oem_size = t->OEMTableSize;
early_acpi_os_unmap_memory(header, tbl_size);
*oem_addr = (unsigned long)__acpi_map_table(oem_addrX, *oem_addr = (unsigned long)__acpi_map_table(oem_addrX,
oem_size); oem_size);
return 0; return 0;
} }
early_acpi_os_unmap_memory(header, tbl_size);
} }
return -1; return -1;
} }
void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr)
{ {
if (!oem_addr)
return;
__acpi_unmap_table((char *)oem_addr, oem_size);
} }
#endif #endif
......
...@@ -764,7 +764,7 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = { ...@@ -764,7 +764,7 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
.callback = dmi_low_memory_corruption, .callback = dmi_low_memory_corruption,
.ident = "Phoenix BIOS", .ident = "Phoenix BIOS",
.matches = { .matches = {
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"), DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"),
}, },
}, },
#endif #endif
......
...@@ -46,7 +46,9 @@ static __cpuinit void check_tsc_warp(void) ...@@ -46,7 +46,9 @@ static __cpuinit void check_tsc_warp(void)
cycles_t start, now, prev, end; cycles_t start, now, prev, end;
int i; int i;
rdtsc_barrier();
start = get_cycles(); start = get_cycles();
rdtsc_barrier();
/* /*
* The measurement runs for 20 msecs: * The measurement runs for 20 msecs:
*/ */
...@@ -61,7 +63,9 @@ static __cpuinit void check_tsc_warp(void) ...@@ -61,7 +63,9 @@ static __cpuinit void check_tsc_warp(void)
*/ */
__raw_spin_lock(&sync_lock); __raw_spin_lock(&sync_lock);
prev = last_tsc; prev = last_tsc;
rdtsc_barrier();
now = get_cycles(); now = get_cycles();
rdtsc_barrier();
last_tsc = now; last_tsc = now;
__raw_spin_unlock(&sync_lock); __raw_spin_unlock(&sync_lock);
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
* This file provides all the same external entries as smp.c but uses * This file provides all the same external entries as smp.c but uses
* the voyager hal to provide the functionality * the voyager hal to provide the functionality
*/ */
#include <linux/cpu.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
...@@ -1790,6 +1791,17 @@ void __init smp_setup_processor_id(void) ...@@ -1790,6 +1791,17 @@ void __init smp_setup_processor_id(void)
x86_write_percpu(cpu_number, hard_smp_processor_id()); x86_write_percpu(cpu_number, hard_smp_processor_id());
} }
static void voyager_send_call_func(cpumask_t callmask)
{
__u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id());
send_CPI(mask, VIC_CALL_FUNCTION_CPI);
}
static void voyager_send_call_func_single(int cpu)
{
send_CPI(1 << cpu, VIC_CALL_FUNCTION_SINGLE_CPI);
}
struct smp_ops smp_ops = { struct smp_ops smp_ops = {
.smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu, .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu,
.smp_prepare_cpus = voyager_smp_prepare_cpus, .smp_prepare_cpus = voyager_smp_prepare_cpus,
...@@ -1799,6 +1811,6 @@ struct smp_ops smp_ops = { ...@@ -1799,6 +1811,6 @@ struct smp_ops smp_ops = {
.smp_send_stop = voyager_smp_send_stop, .smp_send_stop = voyager_smp_send_stop,
.smp_send_reschedule = voyager_smp_send_reschedule, .smp_send_reschedule = voyager_smp_send_reschedule,
.send_call_func_ipi = native_send_call_func_ipi, .send_call_func_ipi = voyager_send_call_func,
.send_call_func_single_ipi = native_send_call_func_single_ipi, .send_call_func_single_ipi = voyager_send_call_func_single,
}; };
...@@ -122,14 +122,7 @@ static struct timer_list balloon_timer; ...@@ -122,14 +122,7 @@ static struct timer_list balloon_timer;
static void scrub_page(struct page *page) static void scrub_page(struct page *page)
{ {
#ifdef CONFIG_XEN_SCRUB_PAGES #ifdef CONFIG_XEN_SCRUB_PAGES
if (PageHighMem(page)) { clear_highpage(page);
void *v = kmap(page);
clear_page(v);
kunmap(v);
} else {
void *v = page_address(page);
clear_page(v);
}
#endif #endif
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment