Commit d26dbc5c authored by FUJITA Tomonori's avatar FUJITA Tomonori Committed by Ingo Molnar

iommu: export iommu_area_reserve helper function

x86 has set_bit_string() that does the exact same thing that
set_bit_area() in lib/iommu-helper.c does.

This patch exports set_bit_area() in lib/iommu-helper.c as
iommu_area_reserve(), converts GART, Calgary, and AMD IOMMU to use it.
Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 832a90c3
...@@ -572,7 +572,7 @@ static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, ...@@ -572,7 +572,7 @@ static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
if (start_page + pages > last_page) if (start_page + pages > last_page)
pages = last_page - start_page; pages = last_page - start_page;
set_bit_string(dom->bitmap, start_page, pages); iommu_area_reserve(dom->bitmap, start_page, pages);
} }
static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom) static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
......
...@@ -261,7 +261,7 @@ static void iommu_range_reserve(struct iommu_table *tbl, ...@@ -261,7 +261,7 @@ static void iommu_range_reserve(struct iommu_table *tbl,
badbit, tbl, start_addr, npages); badbit, tbl, start_addr, npages);
} }
set_bit_string(tbl->it_map, index, npages); iommu_area_reserve(tbl->it_map, index, npages);
spin_unlock_irqrestore(&tbl->it_lock, flags); spin_unlock_irqrestore(&tbl->it_lock, flags);
} }
......
...@@ -827,7 +827,7 @@ void __init gart_iommu_init(void) ...@@ -827,7 +827,7 @@ void __init gart_iommu_init(void)
* Out of IOMMU space handling. * Out of IOMMU space handling.
* Reserve some invalid pages at the beginning of the GART. * Reserve some invalid pages at the beginning of the GART.
*/ */
set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES); iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
agp_memory_reserved = iommu_size; agp_memory_reserved = iommu_size;
printk(KERN_INFO printk(KERN_INFO
......
...@@ -11,6 +11,7 @@ static inline unsigned long iommu_device_max_index(unsigned long size, ...@@ -11,6 +11,7 @@ static inline unsigned long iommu_device_max_index(unsigned long size,
extern int iommu_is_span_boundary(unsigned int index, unsigned int nr, extern int iommu_is_span_boundary(unsigned int index, unsigned int nr,
unsigned long shift, unsigned long shift,
unsigned long boundary_size); unsigned long boundary_size);
extern void iommu_area_reserve(unsigned long *map, unsigned long i, int len);
extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, unsigned long start, unsigned int nr,
unsigned long shift, unsigned long shift,
......
...@@ -30,8 +30,7 @@ static unsigned long find_next_zero_area(unsigned long *map, ...@@ -30,8 +30,7 @@ static unsigned long find_next_zero_area(unsigned long *map,
return index; return index;
} }
static inline void set_bit_area(unsigned long *map, unsigned long i, void iommu_area_reserve(unsigned long *map, unsigned long i, int len)
int len)
{ {
unsigned long end = i + len; unsigned long end = i + len;
while (i < end) { while (i < end) {
...@@ -64,7 +63,7 @@ unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, ...@@ -64,7 +63,7 @@ unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
start = index + 1; start = index + 1;
goto again; goto again;
} }
set_bit_area(map, index, nr); iommu_area_reserve(map, index, nr);
} }
return index; return index;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment