Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
c8339002
Commit
c8339002
authored
Aug 28, 2004
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
bk://linux-voyager.bkbits.net/dma-declare-coherent-memory-2.6
into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents
2ac5f852
75e1802f
Changes
9
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
357 additions
and
8 deletions
+357
-8
Documentation/DMA-API.txt
Documentation/DMA-API.txt
+79
-0
arch/i386/kernel/pci-dma.c
arch/i386/kernel/pci-dma.c
+109
-2
drivers/scsi/NCR_Q720.c
drivers/scsi/NCR_Q720.c
+18
-3
include/asm-i386/dma-mapping.h
include/asm-i386/dma-mapping.h
+12
-0
include/linux/bitmap.h
include/linux/bitmap.h
+3
-0
include/linux/device.h
include/linux/device.h
+3
-0
include/linux/dma-mapping.h
include/linux/dma-mapping.h
+29
-0
lib/bitmap.c
lib/bitmap.c
+82
-0
mm/vmalloc.c
mm/vmalloc.c
+22
-3
No files found.
Documentation/DMA-API.txt
View file @
c8339002
...
...
@@ -444,4 +444,83 @@ dma_alloc_noncoherent(), starting at virtual address vaddr and
continuing on for size. Again, you *must* observe the cache line
boundaries when doing this.
int
dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dma_addr_t device_addr, size_t size, int
flags)
Declare region of memory to be handed out by dma_alloc_coherent when
it's asked for coherent memory for this device.
bus_addr is the physical address to which the memory is currently
assigned in the bus responding region (this will be used by the
platform to perform the mapping)
device_addr is the physical address the device needs to be programmed
with actually to address this memory (this will be handed out as the
dma_addr_t in dma_alloc_coherent())
size is the size of the area (must be multiples of PAGE_SIZE).
flags can be or'd together and are
DMA_MEMORY_MAP - request that the memory returned from
dma_alloc_coherent() be directly writeable.
DMA_MEMORY_IO - request that the memory returned from
dma_alloc_coherent() be addressable using read/write/memcpy_toio etc.
One or both of these flags must be present
DMA_MEMORY_INCLUDES_CHILDREN - make the declared memory be allocated by
dma_alloc_coherent of any child devices of this one (for memory residing
on a bridge).
DMA_MEMORY_EXCLUSIVE - only allocate memory from the declared regions.
Do not allow dma_alloc_coherent() to fall back to system memory when
it's out of memory in the declared region.
The return value will be either DMA_MEMORY_MAP or DMA_MEMORY_IO and
must correspond to a passed in flag (i.e. no returning DMA_MEMORY_IO
if only DMA_MEMORY_MAP were passed in) for success or zero for
failure.
Note, for DMA_MEMORY_IO returns, all subsequent memory returned by
dma_alloc_coherent() may no longer be accessed directly, but instead
must be accessed using the correct bus functions. If your driver
isn't prepared to handle this contingency, it should not specify
DMA_MEMORY_IO in the input flags.
As a simplification for the platforms, only *one* such region of
memory may be declared per device.
For reasons of efficiency, most platforms choose to track the declared
region only at the granularity of a page. For smaller allocations,
you should use the dma_pool() API.
void
dma_release_declared_memory(struct device *dev)
Remove the memory region previously declared from the system. This
API performs *no* in-use checking for this region and will return
unconditionally having removed all the required structures. It is the
drivers job to ensure that no parts of this memory region are
currently in use.
void *
dma_mark_declared_memory_occupied(struct device *dev,
dma_addr_t device_addr, size_t size)
This is used to occupy specific regions of the declared space
(dma_alloc_coherent() will hand out the first free region it finds).
device_addr is the *device* address of the region requested
size is the size (and should be a page sized multiple).
The return value will be either a pointer to the processor virtual
address of the memory, or an error (via PTR_ERR()) if any part of the
region is occupied.
arch/i386/kernel/pci-dma.c
View file @
c8339002
...
...
@@ -13,17 +13,40 @@
#include <linux/pci.h>
#include <asm/io.h>
struct
dma_coherent_mem
{
void
*
virt_base
;
u32
device_base
;
int
size
;
int
flags
;
unsigned
long
*
bitmap
;
};
void
*
dma_alloc_coherent
(
struct
device
*
dev
,
size_t
size
,
dma_addr_t
*
dma_handle
,
int
gfp
)
{
void
*
ret
;
struct
dma_coherent_mem
*
mem
=
dev
?
dev
->
dma_mem
:
NULL
;
int
order
=
get_order
(
size
);
/* ignore region specifiers */
gfp
&=
~
(
__GFP_DMA
|
__GFP_HIGHMEM
);
if
(
mem
)
{
int
page
=
bitmap_find_free_region
(
mem
->
bitmap
,
mem
->
size
,
order
);
if
(
page
>=
0
)
{
*
dma_handle
=
mem
->
device_base
+
(
page
<<
PAGE_SHIFT
);
ret
=
mem
->
virt_base
+
(
page
<<
PAGE_SHIFT
);
memset
(
ret
,
0
,
size
);
return
ret
;
}
if
(
mem
->
flags
&
DMA_MEMORY_EXCLUSIVE
)
return
NULL
;
}
if
(
dev
==
NULL
||
(
dev
->
coherent_dma_mask
<
0xffffffff
))
gfp
|=
GFP_DMA
;
ret
=
(
void
*
)
__get_free_pages
(
gfp
,
get_order
(
size
)
);
ret
=
(
void
*
)
__get_free_pages
(
gfp
,
order
);
if
(
ret
!=
NULL
)
{
memset
(
ret
,
0
,
size
);
...
...
@@ -35,5 +58,89 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
void
dma_free_coherent
(
struct
device
*
dev
,
size_t
size
,
void
*
vaddr
,
dma_addr_t
dma_handle
)
{
free_pages
((
unsigned
long
)
vaddr
,
get_order
(
size
));
struct
dma_coherent_mem
*
mem
=
dev
?
dev
->
dma_mem
:
NULL
;
int
order
=
get_order
(
size
);
if
(
mem
&&
vaddr
>=
mem
->
virt_base
&&
vaddr
<
(
mem
->
virt_base
+
(
mem
->
size
<<
PAGE_SHIFT
)))
{
int
page
=
(
vaddr
-
mem
->
virt_base
)
>>
PAGE_SHIFT
;
bitmap_release_region
(
mem
->
bitmap
,
page
,
order
);
}
else
free_pages
((
unsigned
long
)
vaddr
,
order
);
}
int
dma_declare_coherent_memory
(
struct
device
*
dev
,
dma_addr_t
bus_addr
,
dma_addr_t
device_addr
,
size_t
size
,
int
flags
)
{
void
*
mem_base
;
int
pages
=
size
>>
PAGE_SHIFT
;
int
bitmap_size
=
(
pages
+
31
)
/
32
;
if
((
flags
&
(
DMA_MEMORY_MAP
|
DMA_MEMORY_IO
))
==
0
)
goto
out
;
if
(
!
size
)
goto
out
;
if
(
dev
->
dma_mem
)
goto
out
;
/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
mem_base
=
ioremap
(
bus_addr
,
size
);
if
(
!
mem_base
)
goto
out
;
dev
->
dma_mem
=
kmalloc
(
GFP_KERNEL
,
sizeof
(
struct
dma_coherent_mem
));
if
(
!
dev
->
dma_mem
)
goto
out
;
memset
(
dev
->
dma_mem
,
0
,
sizeof
(
struct
dma_coherent_mem
));
dev
->
dma_mem
->
bitmap
=
kmalloc
(
GFP_KERNEL
,
bitmap_size
);
if
(
!
dev
->
dma_mem
->
bitmap
)
goto
free1_out
;
memset
(
dev
->
dma_mem
->
bitmap
,
0
,
bitmap_size
);
dev
->
dma_mem
->
virt_base
=
mem_base
;
dev
->
dma_mem
->
device_base
=
device_addr
;
dev
->
dma_mem
->
size
=
pages
;
dev
->
dma_mem
->
flags
=
flags
;
if
(
flags
&
DMA_MEMORY_MAP
)
return
DMA_MEMORY_MAP
;
return
DMA_MEMORY_IO
;
free1_out:
kfree
(
dev
->
dma_mem
->
bitmap
);
out:
return
0
;
}
EXPORT_SYMBOL
(
dma_declare_coherent_memory
);
void
dma_release_declared_memory
(
struct
device
*
dev
)
{
struct
dma_coherent_mem
*
mem
=
dev
->
dma_mem
;
if
(
!
mem
)
return
;
dev
->
dma_mem
=
NULL
;
kfree
(
mem
->
bitmap
);
kfree
(
mem
);
}
EXPORT_SYMBOL
(
dma_release_declared_memory
);
void
*
dma_mark_declared_memory_occupied
(
struct
device
*
dev
,
dma_addr_t
device_addr
,
size_t
size
)
{
struct
dma_coherent_mem
*
mem
=
dev
->
dma_mem
;
int
pages
=
(
size
+
(
device_addr
&
~
PAGE_MASK
)
+
PAGE_SIZE
-
1
)
>>
PAGE_SHIFT
;
int
pos
,
err
;
if
(
!
mem
)
return
ERR_PTR
(
-
EINVAL
);
pos
=
(
device_addr
-
mem
->
device_base
)
>>
PAGE_SHIFT
;
err
=
bitmap_allocate_region
(
mem
->
bitmap
,
pos
,
get_order
(
pages
));
if
(
err
!=
0
)
return
ERR_PTR
(
err
);
return
mem
->
virt_base
+
(
pos
<<
PAGE_SHIFT
);
}
EXPORT_SYMBOL
(
dma_mark_declared_memory_occupied
);
drivers/scsi/NCR_Q720.c
View file @
c8339002
...
...
@@ -216,7 +216,21 @@ NCR_Q720_probe(struct device *dev)
goto
out_free
;
}
mem_base
=
(
__u32
)
ioremap
(
base_addr
,
mem_size
);
if
(
dma_declare_coherent_memory
(
dev
,
base_addr
,
base_addr
,
mem_size
,
DMA_MEMORY_MAP
)
!=
DMA_MEMORY_MAP
)
{
printk
(
KERN_ERR
"NCR_Q720: DMA declare memory failed
\n
"
);
goto
out_release_region
;
}
/* The first 1k of the memory buffer is a memory map of the registers
*/
mem_base
=
(
__u32
)
dma_mark_declared_memory_occupied
(
dev
,
base_addr
,
1024
);
if
(
IS_ERR
((
void
*
)
mem_base
))
{
printk
(
"NCR_Q720 failed to reserve memory mapped region
\n
"
);
goto
out_release
;
}
/* now also enable accesses in asr 2 */
asr2
=
inb
(
io_base
+
0x0a
);
...
...
@@ -296,7 +310,8 @@ NCR_Q720_probe(struct device *dev)
return
0
;
out_release:
iounmap
((
void
*
)
mem_base
);
dma_release_declared_memory
(
dev
);
out_release_region:
release_mem_region
(
base_addr
,
mem_size
);
out_free:
kfree
(
p
);
...
...
@@ -321,7 +336,7 @@ NCR_Q720_remove(struct device *dev)
if
(
p
->
hosts
[
i
])
NCR_Q720_remove_one
(
p
->
hosts
[
i
]);
iounmap
((
void
*
)
p
->
mem_base
);
dma_release_declared_memory
(
dev
);
release_mem_region
(
p
->
phys_mem_base
,
p
->
mem_size
);
free_irq
(
p
->
irq
,
p
);
kfree
(
p
);
...
...
include/asm-i386/dma-mapping.h
View file @
c8339002
...
...
@@ -163,4 +163,16 @@ dma_cache_sync(void *vaddr, size_t size,
flush_write_buffers
();
}
#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
extern
int
dma_declare_coherent_memory
(
struct
device
*
dev
,
dma_addr_t
bus_addr
,
dma_addr_t
device_addr
,
size_t
size
,
int
flags
);
extern
void
dma_release_declared_memory
(
struct
device
*
dev
);
extern
void
*
dma_mark_declared_memory_occupied
(
struct
device
*
dev
,
dma_addr_t
device_addr
,
size_t
size
);
#endif
include/linux/bitmap.h
View file @
c8339002
...
...
@@ -98,6 +98,9 @@ extern int bitmap_scnprintf(char *buf, unsigned int len,
const
unsigned
long
*
src
,
int
nbits
);
extern
int
bitmap_parse
(
const
char
__user
*
ubuf
,
unsigned
int
ulen
,
unsigned
long
*
dst
,
int
nbits
);
extern
int
bitmap_find_free_region
(
unsigned
long
*
bitmap
,
int
bits
,
int
order
);
extern
void
bitmap_release_region
(
unsigned
long
*
bitmap
,
int
pos
,
int
order
);
extern
int
bitmap_allocate_region
(
unsigned
long
*
bitmap
,
int
pos
,
int
order
);
#define BITMAP_LAST_WORD_MASK(nbits) \
( \
...
...
include/linux/device.h
View file @
c8339002
...
...
@@ -284,6 +284,9 @@ struct device {
struct
list_head
dma_pools
;
/* dma pools (if dma'ble) */
struct
dma_coherent_mem
*
dma_mem
;
/* internal for coherent mem
override */
void
(
*
release
)(
struct
device
*
dev
);
};
...
...
include/linux/dma-mapping.h
View file @
c8339002
#ifndef _ASM_LINUX_DMA_MAPPING_H
#define _ASM_LINUX_DMA_MAPPING_H
#include <linux/err.h>
/* These definitions mirror those in pci.h, so they can be used
* interchangeably with their PCI_ counterparts */
enum
dma_data_direction
{
...
...
@@ -21,6 +23,33 @@ enum dma_data_direction {
extern
u64
dma_get_required_mask
(
struct
device
*
dev
);
/* flags for the coherent memory api */
#define DMA_MEMORY_MAP 0x01
#define DMA_MEMORY_IO 0x02
#define DMA_MEMORY_INCLUDES_CHILDREN 0x04
#define DMA_MEMORY_EXCLUSIVE 0x08
#ifndef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
static
inline
int
dma_declare_coherent_memory
(
struct
device
*
dev
,
dma_addr_t
bus_addr
,
dma_addr_t
device_addr
,
size_t
size
,
int
flags
)
{
return
0
;
}
static
inline
void
dma_release_declared_memory
(
struct
device
*
dev
)
{
}
static
inline
void
*
dma_mark_declared_memory_occupied
(
struct
device
*
dev
,
dma_addr_t
device_addr
,
size_t
size
)
{
return
ERR_PTR
(
-
EBUSY
);
}
#endif
#endif
lib/bitmap.c
View file @
c8339002
...
...
@@ -408,3 +408,85 @@ int bitmap_parse(const char __user *ubuf, unsigned int ubuflen,
return
0
;
}
EXPORT_SYMBOL
(
bitmap_parse
);
/**
* bitmap_find_free_region - find a contiguous aligned mem region
* @bitmap: an array of unsigned longs corresponding to the bitmap
* @bits: number of bits in the bitmap
* @order: region size to find (size is actually 1<<order)
*
* This is used to allocate a memory region from a bitmap. The idea is
* that the region has to be 1<<order sized and 1<<order aligned (this
* makes the search algorithm much faster).
*
* The region is marked as set bits in the bitmap if a free one is
* found.
*
* Returns either beginning of region or negative error
*/
int
bitmap_find_free_region
(
unsigned
long
*
bitmap
,
int
bits
,
int
order
)
{
unsigned
long
mask
;
int
pages
=
1
<<
order
;
int
i
;
if
(
pages
>
BITS_PER_LONG
)
return
-
EINVAL
;
/* make a mask of the order */
mask
=
(
1ul
<<
(
pages
-
1
));
mask
+=
mask
-
1
;
/* run up the bitmap pages bits at a time */
for
(
i
=
0
;
i
<
bits
;
i
+=
pages
)
{
int
index
=
i
/
BITS_PER_LONG
;
int
offset
=
i
-
(
index
*
BITS_PER_LONG
);
if
((
bitmap
[
index
]
&
(
mask
<<
offset
))
==
0
)
{
/* set region in bimap */
bitmap
[
index
]
|=
(
mask
<<
offset
);
return
i
;
}
}
return
-
ENOMEM
;
}
EXPORT_SYMBOL
(
bitmap_find_free_region
);
/**
* bitmap_release_region - release allocated bitmap region
* @bitmap: a pointer to the bitmap
* @pos: the beginning of the region
* @order: the order of the bits to release (number is 1<<order)
*
* This is the complement to __bitmap_find_free_region and releases
* the found region (by clearing it in the bitmap).
*/
void
bitmap_release_region
(
unsigned
long
*
bitmap
,
int
pos
,
int
order
)
{
int
pages
=
1
<<
order
;
unsigned
long
mask
=
(
1ul
<<
(
pages
-
1
));
int
index
=
pos
/
BITS_PER_LONG
;
int
offset
=
pos
-
(
index
*
BITS_PER_LONG
);
mask
+=
mask
-
1
;
bitmap
[
index
]
&=
~
(
mask
<<
offset
);
}
EXPORT_SYMBOL
(
bitmap_release_region
);
int
bitmap_allocate_region
(
unsigned
long
*
bitmap
,
int
pos
,
int
order
)
{
int
pages
=
1
<<
order
;
unsigned
long
mask
=
(
1ul
<<
(
pages
-
1
));
int
index
=
pos
/
BITS_PER_LONG
;
int
offset
=
pos
-
(
index
*
BITS_PER_LONG
);
/* We don't do regions of pages > BITS_PER_LONG. The
* algorithm would be a simple look for multiple zeros in the
* array, but there's no driver today that needs this. If you
* trip this BUG(), you get to code it... */
BUG_ON
(
pages
>
BITS_PER_LONG
);
mask
+=
mask
-
1
;
if
(
bitmap
[
index
]
&
(
mask
<<
offset
))
return
-
EBUSY
;
bitmap
[
index
]
|=
(
mask
<<
offset
);
return
0
;
}
EXPORT_SYMBOL
(
bitmap_allocate_region
);
mm/vmalloc.c
View file @
c8339002
...
...
@@ -179,11 +179,26 @@ int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
return
err
;
}
#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT)
/* 128 pages */
struct
vm_struct
*
__get_vm_area
(
unsigned
long
size
,
unsigned
long
flags
,
unsigned
long
start
,
unsigned
long
end
)
{
struct
vm_struct
**
p
,
*
tmp
,
*
area
;
unsigned
long
addr
=
start
;
unsigned
long
align
=
1
;
unsigned
long
addr
;
if
(
flags
&
VM_IOREMAP
)
{
int
bit
=
fls
(
size
);
if
(
bit
>
IOREMAP_MAX_ORDER
)
bit
=
IOREMAP_MAX_ORDER
;
else
if
(
bit
<
PAGE_SHIFT
)
bit
=
PAGE_SHIFT
;
align
=
1ul
<<
bit
;
}
addr
=
ALIGN
(
start
,
align
);
area
=
kmalloc
(
sizeof
(
*
area
),
GFP_KERNEL
);
if
(
unlikely
(
!
area
))
...
...
@@ -200,13 +215,17 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
write_lock
(
&
vmlist_lock
);
for
(
p
=
&
vmlist
;
(
tmp
=
*
p
)
!=
NULL
;
p
=
&
tmp
->
next
)
{
if
((
unsigned
long
)
tmp
->
addr
<
addr
)
if
((
unsigned
long
)
tmp
->
addr
<
addr
)
{
if
((
unsigned
long
)
tmp
->
addr
+
tmp
->
size
>=
addr
)
addr
=
ALIGN
(
tmp
->
size
+
(
unsigned
long
)
tmp
->
addr
,
align
);
continue
;
}
if
((
size
+
addr
)
<
addr
)
goto
out
;
if
(
size
+
addr
<=
(
unsigned
long
)
tmp
->
addr
)
goto
found
;
addr
=
tmp
->
size
+
(
unsigned
long
)
tmp
->
addr
;
addr
=
ALIGN
(
tmp
->
size
+
(
unsigned
long
)
tmp
->
addr
,
align
)
;
if
(
addr
>
end
-
size
)
goto
out
;
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment