Commit 7b22ff53 authored by FUJITA Tomonori's avatar FUJITA Tomonori Committed by Ingo Molnar

x86 gart: allocate size-aligned address for alloc_coherent, v2

This patch changes GART IOMMU to return a size aligned address wrt
dma_alloc_coherent, as DMA-mapping.txt defines:

The cpu return address and the DMA bus master address are both
guaranteed to be aligned to the smallest PAGE_SIZE order which
is greater than or equal to the requested size.  This invariant
exists (for example) to guarantee that if you allocate a chunk
which is smaller than or equal to 64 kilobytes, the extent of the
buffer you receive will not cross a 64K boundary.
Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent f3efbe58
...@@ -82,7 +82,8 @@ AGPEXTERN __u32 *agp_gatt_table; ...@@ -82,7 +82,8 @@ AGPEXTERN __u32 *agp_gatt_table;
static unsigned long next_bit; /* protected by iommu_bitmap_lock */ static unsigned long next_bit; /* protected by iommu_bitmap_lock */
static int need_flush; /* global flush state. set for each gart wrap */ static int need_flush; /* global flush state. set for each gart wrap */
static unsigned long alloc_iommu(struct device *dev, int size) static unsigned long alloc_iommu(struct device *dev, int size,
unsigned long align_mask)
{ {
unsigned long offset, flags; unsigned long offset, flags;
unsigned long boundary_size; unsigned long boundary_size;
...@@ -95,11 +96,12 @@ static unsigned long alloc_iommu(struct device *dev, int size) ...@@ -95,11 +96,12 @@ static unsigned long alloc_iommu(struct device *dev, int size)
spin_lock_irqsave(&iommu_bitmap_lock, flags); spin_lock_irqsave(&iommu_bitmap_lock, flags);
offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit, offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
size, base_index, boundary_size, 0); size, base_index, boundary_size, align_mask);
if (offset == -1) { if (offset == -1) {
need_flush = 1; need_flush = 1;
offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0, offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
size, base_index, boundary_size, 0); size, base_index, boundary_size,
align_mask);
} }
if (offset != -1) { if (offset != -1) {
next_bit = offset+size; next_bit = offset+size;
...@@ -236,10 +238,10 @@ nonforced_iommu(struct device *dev, unsigned long addr, size_t size) ...@@ -236,10 +238,10 @@ nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
* Caller needs to check if the iommu is needed and flush. * Caller needs to check if the iommu is needed and flush.
*/ */
static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
size_t size, int dir) size_t size, int dir, unsigned long align_mask)
{ {
unsigned long npages = iommu_num_pages(phys_mem, size); unsigned long npages = iommu_num_pages(phys_mem, size);
unsigned long iommu_page = alloc_iommu(dev, npages); unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
int i; int i;
if (iommu_page == -1) { if (iommu_page == -1) {
...@@ -262,7 +264,11 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, ...@@ -262,7 +264,11 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
static dma_addr_t static dma_addr_t
gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir) gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir)
{ {
dma_addr_t map = dma_map_area(dev, paddr, size, dir); dma_addr_t map;
unsigned long align_mask;
align_mask = (1UL << get_order(size)) - 1;
map = dma_map_area(dev, paddr, size, dir, align_mask);
flush_gart(); flush_gart();
...@@ -281,7 +287,8 @@ gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir) ...@@ -281,7 +287,8 @@ gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
if (!need_iommu(dev, paddr, size)) if (!need_iommu(dev, paddr, size))
return paddr; return paddr;
bus = gart_map_simple(dev, paddr, size, dir); bus = dma_map_area(dev, paddr, size, dir, 0);
flush_gart();
return bus; return bus;
} }
...@@ -340,7 +347,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, ...@@ -340,7 +347,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
unsigned long addr = sg_phys(s); unsigned long addr = sg_phys(s);
if (nonforced_iommu(dev, addr, s->length)) { if (nonforced_iommu(dev, addr, s->length)) {
addr = dma_map_area(dev, addr, s->length, dir); addr = dma_map_area(dev, addr, s->length, dir, 0);
if (addr == bad_dma_address) { if (addr == bad_dma_address) {
if (i > 0) if (i > 0)
gart_unmap_sg(dev, sg, i, dir); gart_unmap_sg(dev, sg, i, dir);
...@@ -362,7 +369,7 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start, ...@@ -362,7 +369,7 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start,
int nelems, struct scatterlist *sout, int nelems, struct scatterlist *sout,
unsigned long pages) unsigned long pages)
{ {
unsigned long iommu_start = alloc_iommu(dev, pages); unsigned long iommu_start = alloc_iommu(dev, pages, 0);
unsigned long iommu_page = iommu_start; unsigned long iommu_page = iommu_start;
struct scatterlist *s; struct scatterlist *s;
int i; int i;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment