Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
c6baa196
Commit
c6baa196
authored
Dec 04, 2009
by
Russell King
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'pending-dma-coherent' into devel
parents
5cb2faa6
26a26d32
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
366 additions
and
326 deletions
+366
-326
arch/arm/include/asm/pgtable.h
arch/arm/include/asm/pgtable.h
+12
-2
arch/arm/include/asm/system.h
arch/arm/include/asm/system.h
+12
-7
arch/arm/mm/Makefile
arch/arm/mm/Makefile
+1
-1
arch/arm/mm/dma-mapping.c
arch/arm/mm/dma-mapping.c
+181
-316
arch/arm/mm/vmregion.c
arch/arm/mm/vmregion.c
+131
-0
arch/arm/mm/vmregion.h
arch/arm/mm/vmregion.h
+29
-0
No files found.
arch/arm/include/asm/pgtable.h
View file @
c6baa196
...
...
@@ -304,13 +304,23 @@ PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
static
inline
pte_t
pte_mkspecial
(
pte_t
pte
)
{
return
pte
;
}
#define __pgprot_modify(prot,mask,bits) \
__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
/*
* Mark the prot value as uncacheable and unbufferable.
*/
#define pgprot_noncached(prot) \
__pgprot
((pgprot_val(prot) & ~L_PTE_MT_MASK) |
L_PTE_MT_UNCACHED)
__pgprot
_modify(prot, L_PTE_MT_MASK,
L_PTE_MT_UNCACHED)
#define pgprot_writecombine(prot) \
__pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_BUFFERABLE)
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
#if __LINUX_ARM_ARCH__ >= 7
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
#else
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
#endif
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_present(pmd) (pmd_val(pmd))
...
...
arch/arm/include/asm/system.h
View file @
c6baa196
...
...
@@ -138,21 +138,26 @@ extern unsigned int user_debug;
#define dmb() __asm__ __volatile__ ("" : : : "memory")
#endif
#ifndef CONFIG_SMP
#if __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP)
#define mb() dmb()
#define rmb() dmb()
#define wmb() dmb()
#else
#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
#endif
#ifndef CONFIG_SMP
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#else
#define mb() dmb()
#define rmb() dmb()
#define wmb() dmb()
#define smp_mb() dmb()
#define smp_rmb() dmb()
#define smp_wmb() dmb()
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#endif
#define read_barrier_depends() do { } while(0)
#define smp_read_barrier_depends() do { } while(0)
...
...
arch/arm/mm/Makefile
View file @
c6baa196
...
...
@@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
iomap.o
obj-$(CONFIG_MMU)
+=
fault-armv.o flush.o ioremap.o mmap.o
\
pgd.o mmu.o
pgd.o mmu.o
vmregion.o
ifneq
($(CONFIG_MMU),y)
obj-y
+=
nommu.o
...
...
arch/arm/mm/dma-mapping.c
View file @
c6baa196
...
...
@@ -63,194 +63,152 @@ static u64 get_coherent_dma_mask(struct device *dev)
return
mask
;
}
#ifdef CONFIG_MMU
/*
* These are the page tables (2MB each) covering uncached, DMA consistent allocations
* Allocate a DMA buffer for 'dev' of size 'size' using the
* specified gfp mask. Note that 'size' must be page aligned.
*/
static
pte_t
*
consistent_pte
[
NUM_CONSISTENT_PTES
];
static
DEFINE_SPINLOCK
(
consistent_lock
);
static
struct
page
*
__dma_alloc_buffer
(
struct
device
*
dev
,
size_t
size
,
gfp_t
gfp
)
{
unsigned
long
order
=
get_order
(
size
);
struct
page
*
page
,
*
p
,
*
e
;
void
*
ptr
;
u64
mask
=
get_coherent_dma_mask
(
dev
);
/*
* VM region handling support.
*
* This should become something generic, handling VM region allocations for
* vmalloc and similar (ioremap, module space, etc).
*
* I envisage vmalloc()'s supporting vm_struct becoming:
*
* struct vm_struct {
* struct vm_region region;
* unsigned long flags;
* struct page **pages;
* unsigned int nr_pages;
* unsigned long phys_addr;
* };
*
* get_vm_area() would then call vm_region_alloc with an appropriate
* struct vm_region head (eg):
*
* struct vm_region vmalloc_head = {
* .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
* .vm_start = VMALLOC_START,
* .vm_end = VMALLOC_END,
* };
*
* However, vmalloc_head.vm_start is variable (typically, it is dependent on
* the amount of RAM found at boot time.) I would imagine that get_vm_area()
* would have to initialise this each time prior to calling vm_region_alloc().
*/
struct
arm_vm_region
{
struct
list_head
vm_list
;
unsigned
long
vm_start
;
unsigned
long
vm_end
;
struct
page
*
vm_pages
;
int
vm_active
;
};
#ifdef CONFIG_DMA_API_DEBUG
u64
limit
=
(
mask
+
1
)
&
~
mask
;
if
(
limit
&&
size
>=
limit
)
{
dev_warn
(
dev
,
"coherent allocation too big (requested %#x mask %#llx)
\n
"
,
size
,
mask
);
return
NULL
;
}
#endif
static
struct
arm_vm_region
consistent_head
=
{
.
vm_list
=
LIST_HEAD_INIT
(
consistent_head
.
vm_list
),
.
vm_start
=
CONSISTENT_BASE
,
.
vm_end
=
CONSISTENT_END
,
};
if
(
!
mask
)
return
NULL
;
static
struct
arm_vm_region
*
arm_vm_region_alloc
(
struct
arm_vm_region
*
head
,
size_t
size
,
gfp_t
gfp
)
{
unsigned
long
addr
=
head
->
vm_start
,
end
=
head
->
vm_end
-
size
;
unsigned
long
flags
;
struct
arm_vm_region
*
c
,
*
new
;
new
=
kmalloc
(
sizeof
(
struct
arm_vm_region
),
gfp
);
if
(
!
new
)
goto
out
;
spin_lock_irqsave
(
&
consistent_lock
,
flags
);
list_for_each_entry
(
c
,
&
head
->
vm_list
,
vm_list
)
{
if
((
addr
+
size
)
<
addr
)
goto
nospc
;
if
((
addr
+
size
)
<=
c
->
vm_start
)
goto
found
;
addr
=
c
->
vm_end
;
if
(
addr
>
end
)
goto
nospc
;
}
if
(
mask
<
0xffffffffULL
)
gfp
|=
GFP_DMA
;
page
=
alloc_pages
(
gfp
,
order
);
if
(
!
page
)
return
NULL
;
found:
/*
*
Insert this entry _before_ the one we found.
*
Now split the huge page and free the excess pages
*/
list_add_tail
(
&
new
->
vm_list
,
&
c
->
vm_list
);
new
->
vm_start
=
addr
;
new
->
vm_end
=
addr
+
size
;
new
->
vm_active
=
1
;
spin_unlock_irqrestore
(
&
consistent_lock
,
flags
);
return
new
;
nospc:
spin_unlock_irqrestore
(
&
consistent_lock
,
flags
);
kfree
(
new
);
out:
return
NULL
;
split_page
(
page
,
order
);
for
(
p
=
page
+
(
size
>>
PAGE_SHIFT
),
e
=
page
+
(
1
<<
order
);
p
<
e
;
p
++
)
__free_page
(
p
);
/*
* Ensure that the allocated pages are zeroed, and that any data
* lurking in the kernel direct-mapped region is invalidated.
*/
ptr
=
page_address
(
page
);
memset
(
ptr
,
0
,
size
);
dmac_flush_range
(
ptr
,
ptr
+
size
);
outer_flush_range
(
__pa
(
ptr
),
__pa
(
ptr
)
+
size
);
return
page
;
}
static
struct
arm_vm_region
*
arm_vm_region_find
(
struct
arm_vm_region
*
head
,
unsigned
long
addr
)
/*
* Free a DMA buffer. 'size' must be page aligned.
*/
static
void
__dma_free_buffer
(
struct
page
*
page
,
size_t
size
)
{
struct
arm_vm_region
*
c
;
list_for_each_entry
(
c
,
&
head
->
vm_list
,
vm_list
)
{
if
(
c
->
vm_active
&&
c
->
vm_start
==
addr
)
goto
out
;
struct
page
*
e
=
page
+
(
size
>>
PAGE_SHIFT
)
;
while
(
page
<
e
)
{
__free_page
(
page
);
page
++
;
}
c
=
NULL
;
out:
return
c
;
}
#ifdef CONFIG_MMU
/*
* These are the page tables (2MB each) covering uncached, DMA consistent allocations
*/
static
pte_t
*
consistent_pte
[
NUM_CONSISTENT_PTES
];
#include "vmregion.h"
static
struct
arm_vmregion_head
consistent_head
=
{
.
vm_lock
=
__SPIN_LOCK_UNLOCKED
(
&
consistent_head
.
vm_lock
),
.
vm_list
=
LIST_HEAD_INIT
(
consistent_head
.
vm_list
),
.
vm_start
=
CONSISTENT_BASE
,
.
vm_end
=
CONSISTENT_END
,
};
#ifdef CONFIG_HUGETLB_PAGE
#error ARM Coherent DMA allocator does not (yet) support huge TLB
#endif
static
void
*
__dma_alloc
(
struct
device
*
dev
,
size_t
size
,
dma_addr_t
*
handle
,
gfp_t
gfp
,
pgprot_t
prot
)
/*
* Initialise the consistent memory allocation.
*/
static
int
__init
consistent_init
(
void
)
{
struct
page
*
page
;
struct
arm_vm_region
*
c
;
unsigned
long
order
;
u64
mask
=
get_coherent_dma_mask
(
dev
);
u64
limit
;
int
ret
=
0
;
pgd_t
*
pgd
;
pmd_t
*
pmd
;
pte_t
*
pte
;
int
i
=
0
;
u32
base
=
CONSISTENT_BASE
;
if
(
!
consistent_pte
[
0
])
{
printk
(
KERN_ERR
"%s: not initialised
\n
"
,
__func__
);
dump_stack
();
return
NULL
;
}
do
{
pgd
=
pgd_offset
(
&
init_mm
,
base
);
pmd
=
pmd_alloc
(
&
init_mm
,
pgd
,
base
);
if
(
!
pmd
)
{
printk
(
KERN_ERR
"%s: no pmd tables
\n
"
,
__func__
);
ret
=
-
ENOMEM
;
break
;
}
WARN_ON
(
!
pmd_none
(
*
pmd
));
if
(
!
mask
)
goto
no_page
;
pte
=
pte_alloc_kernel
(
pmd
,
base
);
if
(
!
pte
)
{
printk
(
KERN_ERR
"%s: no pte tables
\n
"
,
__func__
);
ret
=
-
ENOMEM
;
break
;
}
/*
* Sanity check the allocation size.
*/
size
=
PAGE_ALIGN
(
size
);
limit
=
(
mask
+
1
)
&
~
mask
;
if
((
limit
&&
size
>=
limit
)
||
size
>=
(
CONSISTENT_END
-
CONSISTENT_BASE
))
{
printk
(
KERN_WARNING
"coherent allocation too big "
"(requested %#x mask %#llx)
\n
"
,
size
,
mask
);
goto
no_page
;
}
consistent_pte
[
i
++
]
=
pte
;
base
+=
(
1
<<
PGDIR_SHIFT
);
}
while
(
base
<
CONSISTENT_END
);
order
=
get_order
(
size
);
return
ret
;
}
if
(
mask
<
0xffffffffULL
)
gfp
|=
GFP_DMA
;
core_initcall
(
consistent_init
);
page
=
alloc_pages
(
gfp
,
order
);
if
(
!
page
)
goto
no_page
;
static
void
*
__dma_alloc_remap
(
struct
page
*
page
,
size_t
size
,
gfp_t
gfp
,
pgprot_t
prot
)
{
struct
arm_vmregion
*
c
;
/*
* Invalidate any data that might be lurking in the
* kernel direct-mapped region for device DMA.
*/
{
void
*
ptr
=
page_address
(
page
);
memset
(
ptr
,
0
,
size
);
dmac_flush_range
(
ptr
,
ptr
+
size
);
outer_flush_range
(
__pa
(
ptr
),
__pa
(
ptr
)
+
size
);
if
(
!
consistent_pte
[
0
])
{
printk
(
KERN_ERR
"%s: not initialised
\n
"
,
__func__
);
dump_stack
();
return
NULL
;
}
/*
* Allocate a virtual address in the consistent mapping region.
*/
c
=
arm_vm
_
region_alloc
(
&
consistent_head
,
size
,
c
=
arm_vmregion_alloc
(
&
consistent_head
,
size
,
gfp
&
~
(
__GFP_DMA
|
__GFP_HIGHMEM
));
if
(
c
)
{
pte_t
*
pte
;
struct
page
*
end
=
page
+
(
1
<<
order
);
int
idx
=
CONSISTENT_PTE_INDEX
(
c
->
vm_start
);
u32
off
=
CONSISTENT_OFFSET
(
c
->
vm_start
)
&
(
PTRS_PER_PTE
-
1
);
pte
=
consistent_pte
[
idx
]
+
off
;
c
->
vm_pages
=
page
;
split_page
(
page
,
order
);
/*
* Set the "dma handle"
*/
*
handle
=
page_to_dma
(
dev
,
page
);
do
{
BUG_ON
(
!
pte_none
(
*
pte
));
/*
* x86 does not mark the pages reserved...
*/
SetPageReserved
(
page
);
set_pte_ext
(
pte
,
mk_pte
(
page
,
prot
),
0
);
page
++
;
pte
++
;
...
...
@@ -261,48 +219,90 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
}
}
while
(
size
-=
PAGE_SIZE
);
/*
* Free the otherwise unused pages.
*/
while
(
page
<
end
)
{
__free_page
(
page
);
page
++
;
}
return
(
void
*
)
c
->
vm_start
;
}
if
(
page
)
__free_pages
(
page
,
order
);
no_page:
*
handle
=
~
0
;
return
NULL
;
}
static
void
__dma_free_remap
(
void
*
cpu_addr
,
size_t
size
)
{
struct
arm_vmregion
*
c
;
unsigned
long
addr
;
pte_t
*
ptep
;
int
idx
;
u32
off
;
c
=
arm_vmregion_find_remove
(
&
consistent_head
,
(
unsigned
long
)
cpu_addr
);
if
(
!
c
)
{
printk
(
KERN_ERR
"%s: trying to free invalid coherent area: %p
\n
"
,
__func__
,
cpu_addr
);
dump_stack
();
return
;
}
if
((
c
->
vm_end
-
c
->
vm_start
)
!=
size
)
{
printk
(
KERN_ERR
"%s: freeing wrong coherent size (%ld != %d)
\n
"
,
__func__
,
c
->
vm_end
-
c
->
vm_start
,
size
);
dump_stack
();
size
=
c
->
vm_end
-
c
->
vm_start
;
}
idx
=
CONSISTENT_PTE_INDEX
(
c
->
vm_start
);
off
=
CONSISTENT_OFFSET
(
c
->
vm_start
)
&
(
PTRS_PER_PTE
-
1
);
ptep
=
consistent_pte
[
idx
]
+
off
;
addr
=
c
->
vm_start
;
do
{
pte_t
pte
=
ptep_get_and_clear
(
&
init_mm
,
addr
,
ptep
);
ptep
++
;
addr
+=
PAGE_SIZE
;
off
++
;
if
(
off
>=
PTRS_PER_PTE
)
{
off
=
0
;
ptep
=
consistent_pte
[
++
idx
];
}
if
(
pte_none
(
pte
)
||
!
pte_present
(
pte
))
printk
(
KERN_CRIT
"%s: bad page in kernel page table
\n
"
,
__func__
);
}
while
(
size
-=
PAGE_SIZE
);
flush_tlb_kernel_range
(
c
->
vm_start
,
c
->
vm_end
);
arm_vmregion_free
(
&
consistent_head
,
c
);
}
#else
/* !CONFIG_MMU */
#define __dma_alloc_remap(page, size, gfp, prot) page_address(page)
#define __dma_free_remap(addr, size) do { } while (0)
#endif
/* CONFIG_MMU */
static
void
*
__dma_alloc
(
struct
device
*
dev
,
size_t
size
,
dma_addr_t
*
handle
,
gfp_t
gfp
,
pgprot_t
prot
)
{
void
*
virt
;
u64
mask
=
get_coherent_dma_mask
(
dev
)
;
struct
page
*
page
;
void
*
addr
;
if
(
!
mask
)
goto
error
;
*
handle
=
~
0
;
size
=
PAGE_ALIGN
(
size
)
;
if
(
mask
<
0xffffffffULL
)
gfp
|=
GFP_DMA
;
virt
=
kmalloc
(
size
,
gfp
);
if
(
!
virt
)
goto
error
;
page
=
__dma_alloc_buffer
(
dev
,
size
,
gfp
);
if
(
!
page
)
return
NULL
;
*
handle
=
virt_to_dma
(
dev
,
virt
);
return
virt
;
if
(
!
arch_is_coherent
())
addr
=
__dma_alloc_remap
(
page
,
size
,
gfp
,
prot
);
else
addr
=
page_address
(
page
);
error:
*
handle
=
~
0
;
return
NULL
;
if
(
addr
)
*
handle
=
page_to_dma
(
dev
,
page
);
return
addr
;
}
#endif
/* CONFIG_MMU */
/*
* Allocate DMA-coherent memory space and return both the kernel remapped
...
...
@@ -316,19 +316,8 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gf
if
(
dma_alloc_from_coherent
(
dev
,
size
,
handle
,
&
memory
))
return
memory
;
if
(
arch_is_coherent
())
{
void
*
virt
;
virt
=
kmalloc
(
size
,
gfp
);
if
(
!
virt
)
return
NULL
;
*
handle
=
virt_to_dma
(
dev
,
virt
);
return
virt
;
}
return
__dma_alloc
(
dev
,
size
,
handle
,
gfp
,
pgprot_
noncached
(
pgprot_kernel
));
pgprot_
dmacoherent
(
pgprot_kernel
));
}
EXPORT_SYMBOL
(
dma_alloc_coherent
);
...
...
@@ -349,15 +338,12 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
{
int
ret
=
-
ENXIO
;
#ifdef CONFIG_MMU
unsigned
long
flags
,
user_size
,
kern_size
;
struct
arm_vm
_
region
*
c
;
unsigned
long
user_size
,
kern_size
;
struct
arm_vmregion
*
c
;
user_size
=
(
vma
->
vm_end
-
vma
->
vm_start
)
>>
PAGE_SHIFT
;
spin_lock_irqsave
(
&
consistent_lock
,
flags
);
c
=
arm_vm_region_find
(
&
consistent_head
,
(
unsigned
long
)
cpu_addr
);
spin_unlock_irqrestore
(
&
consistent_lock
,
flags
);
c
=
arm_vmregion_find
(
&
consistent_head
,
(
unsigned
long
)
cpu_addr
);
if
(
c
)
{
unsigned
long
off
=
vma
->
vm_pgoff
;
...
...
@@ -379,7 +365,7 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
int
dma_mmap_coherent
(
struct
device
*
dev
,
struct
vm_area_struct
*
vma
,
void
*
cpu_addr
,
dma_addr_t
dma_addr
,
size_t
size
)
{
vma
->
vm_page_prot
=
pgprot_
noncached
(
vma
->
vm_page_prot
);
vma
->
vm_page_prot
=
pgprot_
dmacoherent
(
vma
->
vm_page_prot
);
return
dma_mmap
(
dev
,
vma
,
cpu_addr
,
dma_addr
,
size
);
}
EXPORT_SYMBOL
(
dma_mmap_coherent
);
...
...
@@ -396,143 +382,22 @@ EXPORT_SYMBOL(dma_mmap_writecombine);
* free a page as defined by the above mapping.
* Must not be called with IRQs disabled.
*/
#ifdef CONFIG_MMU
void
dma_free_coherent
(
struct
device
*
dev
,
size_t
size
,
void
*
cpu_addr
,
dma_addr_t
handle
)
{
struct
arm_vm_region
*
c
;
unsigned
long
flags
,
addr
;
pte_t
*
ptep
;
int
idx
;
u32
off
;
WARN_ON
(
irqs_disabled
());
if
(
dma_release_from_coherent
(
dev
,
get_order
(
size
),
cpu_addr
))
return
;
if
(
arch_is_coherent
())
{
kfree
(
cpu_addr
);
return
;
}
size
=
PAGE_ALIGN
(
size
);
spin_lock_irqsave
(
&
consistent_lock
,
flags
);
c
=
arm_vm_region_find
(
&
consistent_head
,
(
unsigned
long
)
cpu_addr
);
if
(
!
c
)
goto
no_area
;
c
->
vm_active
=
0
;
spin_unlock_irqrestore
(
&
consistent_lock
,
flags
);
if
((
c
->
vm_end
-
c
->
vm_start
)
!=
size
)
{
printk
(
KERN_ERR
"%s: freeing wrong coherent size (%ld != %d)
\n
"
,
__func__
,
c
->
vm_end
-
c
->
vm_start
,
size
);
dump_stack
();
size
=
c
->
vm_end
-
c
->
vm_start
;
}
idx
=
CONSISTENT_PTE_INDEX
(
c
->
vm_start
);
off
=
CONSISTENT_OFFSET
(
c
->
vm_start
)
&
(
PTRS_PER_PTE
-
1
);
ptep
=
consistent_pte
[
idx
]
+
off
;
addr
=
c
->
vm_start
;
do
{
pte_t
pte
=
ptep_get_and_clear
(
&
init_mm
,
addr
,
ptep
);
unsigned
long
pfn
;
ptep
++
;
addr
+=
PAGE_SIZE
;
off
++
;
if
(
off
>=
PTRS_PER_PTE
)
{
off
=
0
;
ptep
=
consistent_pte
[
++
idx
];
}
if
(
!
pte_none
(
pte
)
&&
pte_present
(
pte
))
{
pfn
=
pte_pfn
(
pte
);
if
(
pfn_valid
(
pfn
))
{
struct
page
*
page
=
pfn_to_page
(
pfn
);
/*
* x86 does not mark the pages reserved...
*/
ClearPageReserved
(
page
);
__free_page
(
page
);
continue
;
}
}
printk
(
KERN_CRIT
"%s: bad page in kernel page table
\n
"
,
__func__
);
}
while
(
size
-=
PAGE_SIZE
);
flush_tlb_kernel_range
(
c
->
vm_start
,
c
->
vm_end
);
spin_lock_irqsave
(
&
consistent_lock
,
flags
);
list_del
(
&
c
->
vm_list
);
spin_unlock_irqrestore
(
&
consistent_lock
,
flags
);
kfree
(
c
);
return
;
if
(
!
arch_is_coherent
())
__dma_free_remap
(
cpu_addr
,
size
);
no_area:
spin_unlock_irqrestore
(
&
consistent_lock
,
flags
);
printk
(
KERN_ERR
"%s: trying to free invalid coherent area: %p
\n
"
,
__func__
,
cpu_addr
);
dump_stack
();
__dma_free_buffer
(
dma_to_page
(
dev
,
handle
),
size
);
}
#else
/* !CONFIG_MMU */
void
dma_free_coherent
(
struct
device
*
dev
,
size_t
size
,
void
*
cpu_addr
,
dma_addr_t
handle
)
{
if
(
dma_release_from_coherent
(
dev
,
get_order
(
size
),
cpu_addr
))
return
;
kfree
(
cpu_addr
);
}
#endif
/* CONFIG_MMU */
EXPORT_SYMBOL
(
dma_free_coherent
);
/*
* Initialise the consistent memory allocation.
*/
static
int
__init
consistent_init
(
void
)
{
int
ret
=
0
;
#ifdef CONFIG_MMU
pgd_t
*
pgd
;
pmd_t
*
pmd
;
pte_t
*
pte
;
int
i
=
0
;
u32
base
=
CONSISTENT_BASE
;
do
{
pgd
=
pgd_offset
(
&
init_mm
,
base
);
pmd
=
pmd_alloc
(
&
init_mm
,
pgd
,
base
);
if
(
!
pmd
)
{
printk
(
KERN_ERR
"%s: no pmd tables
\n
"
,
__func__
);
ret
=
-
ENOMEM
;
break
;
}
WARN_ON
(
!
pmd_none
(
*
pmd
));
pte
=
pte_alloc_kernel
(
pmd
,
base
);
if
(
!
pte
)
{
printk
(
KERN_ERR
"%s: no pte tables
\n
"
,
__func__
);
ret
=
-
ENOMEM
;
break
;
}
consistent_pte
[
i
++
]
=
pte
;
base
+=
(
1
<<
PGDIR_SHIFT
);
}
while
(
base
<
CONSISTENT_END
);
#endif
/* !CONFIG_MMU */
return
ret
;
}
core_initcall
(
consistent_init
);
/*
* Make an area consistent for devices.
* Note: Drivers should NOT use this function directly, as it will break
...
...
arch/arm/mm/vmregion.c
0 → 100644
View file @
c6baa196
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/slab.h>
#include "vmregion.h"
/*
* VM region handling support.
*
* This should become something generic, handling VM region allocations for
* vmalloc and similar (ioremap, module space, etc).
*
* I envisage vmalloc()'s supporting vm_struct becoming:
*
* struct vm_struct {
* struct vmregion region;
* unsigned long flags;
* struct page **pages;
* unsigned int nr_pages;
* unsigned long phys_addr;
* };
*
* get_vm_area() would then call vmregion_alloc with an appropriate
* struct vmregion head (eg):
*
* struct vmregion vmalloc_head = {
* .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
* .vm_start = VMALLOC_START,
* .vm_end = VMALLOC_END,
* };
*
* However, vmalloc_head.vm_start is variable (typically, it is dependent on
* the amount of RAM found at boot time.) I would imagine that get_vm_area()
* would have to initialise this each time prior to calling vmregion_alloc().
*/
struct
arm_vmregion
*
arm_vmregion_alloc
(
struct
arm_vmregion_head
*
head
,
size_t
size
,
gfp_t
gfp
)
{
unsigned
long
addr
=
head
->
vm_start
,
end
=
head
->
vm_end
-
size
;
unsigned
long
flags
;
struct
arm_vmregion
*
c
,
*
new
;
if
(
head
->
vm_end
-
head
->
vm_start
<
size
)
{
printk
(
KERN_WARNING
"%s: allocation too big (requested %#x)
\n
"
,
__func__
,
size
);
goto
out
;
}
new
=
kmalloc
(
sizeof
(
struct
arm_vmregion
),
gfp
);
if
(
!
new
)
goto
out
;
spin_lock_irqsave
(
&
head
->
vm_lock
,
flags
);
list_for_each_entry
(
c
,
&
head
->
vm_list
,
vm_list
)
{
if
((
addr
+
size
)
<
addr
)
goto
nospc
;
if
((
addr
+
size
)
<=
c
->
vm_start
)
goto
found
;
addr
=
c
->
vm_end
;
if
(
addr
>
end
)
goto
nospc
;
}
found:
/*
* Insert this entry _before_ the one we found.
*/
list_add_tail
(
&
new
->
vm_list
,
&
c
->
vm_list
);
new
->
vm_start
=
addr
;
new
->
vm_end
=
addr
+
size
;
new
->
vm_active
=
1
;
spin_unlock_irqrestore
(
&
head
->
vm_lock
,
flags
);
return
new
;
nospc:
spin_unlock_irqrestore
(
&
head
->
vm_lock
,
flags
);
kfree
(
new
);
out:
return
NULL
;
}
static
struct
arm_vmregion
*
__arm_vmregion_find
(
struct
arm_vmregion_head
*
head
,
unsigned
long
addr
)
{
struct
arm_vmregion
*
c
;
list_for_each_entry
(
c
,
&
head
->
vm_list
,
vm_list
)
{
if
(
c
->
vm_active
&&
c
->
vm_start
==
addr
)
goto
out
;
}
c
=
NULL
;
out:
return
c
;
}
struct
arm_vmregion
*
arm_vmregion_find
(
struct
arm_vmregion_head
*
head
,
unsigned
long
addr
)
{
struct
arm_vmregion
*
c
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
head
->
vm_lock
,
flags
);
c
=
__arm_vmregion_find
(
head
,
addr
);
spin_unlock_irqrestore
(
&
head
->
vm_lock
,
flags
);
return
c
;
}
struct
arm_vmregion
*
arm_vmregion_find_remove
(
struct
arm_vmregion_head
*
head
,
unsigned
long
addr
)
{
struct
arm_vmregion
*
c
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
head
->
vm_lock
,
flags
);
c
=
__arm_vmregion_find
(
head
,
addr
);
if
(
c
)
c
->
vm_active
=
0
;
spin_unlock_irqrestore
(
&
head
->
vm_lock
,
flags
);
return
c
;
}
void
arm_vmregion_free
(
struct
arm_vmregion_head
*
head
,
struct
arm_vmregion
*
c
)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
head
->
vm_lock
,
flags
);
list_del
(
&
c
->
vm_list
);
spin_unlock_irqrestore
(
&
head
->
vm_lock
,
flags
);
kfree
(
c
);
}
arch/arm/mm/vmregion.h
0 → 100644
View file @
c6baa196
#ifndef VMREGION_H
#define VMREGION_H
#include <linux/spinlock.h>
#include <linux/list.h>
struct
page
;
struct
arm_vmregion_head
{
spinlock_t
vm_lock
;
struct
list_head
vm_list
;
unsigned
long
vm_start
;
unsigned
long
vm_end
;
};
struct
arm_vmregion
{
struct
list_head
vm_list
;
unsigned
long
vm_start
;
unsigned
long
vm_end
;
struct
page
*
vm_pages
;
int
vm_active
;
};
struct
arm_vmregion
*
arm_vmregion_alloc
(
struct
arm_vmregion_head
*
,
size_t
,
gfp_t
);
struct
arm_vmregion
*
arm_vmregion_find
(
struct
arm_vmregion_head
*
,
unsigned
long
);
struct
arm_vmregion
*
arm_vmregion_find_remove
(
struct
arm_vmregion_head
*
,
unsigned
long
);
void
arm_vmregion_free
(
struct
arm_vmregion_head
*
,
struct
arm_vmregion
*
);
#endif
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment