Commit a9c55b3b authored by Keshavamurthy, Anil S's avatar Keshavamurthy, Anil S Committed by Linus Torvalds

Intel IOMMU: clflush_cache_range now takes size param

Introduce the size param for clflush_cache_range().
Signed-off-by: default avatarAnil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Muli Ben-Yehuda <muli@il.ibm.com>
Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Ashok Raj <ashok.raj@intel.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: Greg KH <greg@kroah.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 994a65e2
......@@ -61,10 +61,10 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
return base;
}
static void cache_flush_page(void *adr)
void clflush_cache_range(void *adr, int size)
{
int i;
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
clflush(adr+i);
}
......@@ -80,7 +80,7 @@ static void flush_kernel_map(void *arg)
asm volatile("wbinvd" ::: "memory");
else list_for_each_entry(pg, l, lru) {
void *adr = page_address(pg);
cache_flush_page(adr);
clflush_cache_range(adr, PAGE_SIZE);
}
__flush_tlb_all();
}
......
......@@ -27,6 +27,7 @@
void global_flush_tlb(void);
int change_page_attr(struct page *page, int numpages, pgprot_t prot);
int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot);
void clflush_cache_range(void *addr, int size);
#ifdef CONFIG_DEBUG_PAGEALLOC
/* internal debugging function */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment