Commit 843b2fd9 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] sh: DMA Mapping API

From: Paul Mundt <lethal@linux-sh.org>

This implements the DMA mapping API for sh, as well as cleaning up some
sh-specific DMA drivers.
parent f1cf1dc6
menu "DMA support"
config SH_DMA config SH_DMA
bool "DMA controller (DMAC) support" bool "DMA controller (DMAC) support"
help help
...@@ -34,9 +36,20 @@ config NR_DMA_CHANNELS ...@@ -34,9 +36,20 @@ config NR_DMA_CHANNELS
config DMA_PAGE_OPS config DMA_PAGE_OPS
bool "Use DMAC for page copy/clear" bool "Use DMAC for page copy/clear"
depends on SH_DMA depends on SH_DMA && BROKEN
help help
Selecting this option will use a dual-address mode configured channel Selecting this option will use a dual-address mode configured channel
in the SH DMAC for copy_page()/clear_page(). Primarily a performance in the SH DMAC for copy_page()/clear_page(). Primarily a performance
hack. hack.
config DMA_PAGE_OPS_CHANNEL
depends on DMA_PAGE_OPS
int "DMA channel for sh memory-manager page copy/clear"
default "3"
help
This allows the specification of the dual address dma channel,
in case channel 3 is unavailable. On the SH4, channels 1,2, and 3
are dual-address capable.
endmenu
...@@ -104,6 +104,11 @@ void dma_wait_for_completion(unsigned int chan) ...@@ -104,6 +104,11 @@ void dma_wait_for_completion(unsigned int chan)
{ {
struct dma_info *info = get_dma_info(chan); struct dma_info *info = get_dma_info(chan);
if (info->tei_capable) {
wait_event(info->wait_queue, (info->ops->get_residue(info) == 0));
return;
}
while (info->ops->get_residue(info)) while (info->ops->get_residue(info))
cpu_relax(); cpu_relax();
} }
...@@ -161,6 +166,7 @@ int __init register_dmac(struct dma_ops *ops) ...@@ -161,6 +166,7 @@ int __init register_dmac(struct dma_ops *ops)
info->chan = i; info->chan = i;
init_MUTEX(&info->sem); init_MUTEX(&info->sem);
init_waitqueue_head(&info->wait_queue);
} }
return 0; return 0;
......
...@@ -55,9 +55,9 @@ struct sh_dmac_channel { ...@@ -55,9 +55,9 @@ struct sh_dmac_channel {
} __attribute__ ((aligned(16))); } __attribute__ ((aligned(16)));
struct sh_dmac_info { struct sh_dmac_info {
struct sh_dmac_channel channel[MAX_DMAC_CHANNELS]; struct sh_dmac_channel channel[4];
unsigned long dmaor; unsigned long dmaor;
} __attribute__ ((packed)); };
static volatile struct sh_dmac_info *sh_dmac = (volatile struct sh_dmac_info *)SH_DMAC_BASE; static volatile struct sh_dmac_info *sh_dmac = (volatile struct sh_dmac_info *)SH_DMAC_BASE;
...@@ -74,25 +74,12 @@ static inline unsigned int get_dmte_irq(unsigned int chan) ...@@ -74,25 +74,12 @@ static inline unsigned int get_dmte_irq(unsigned int chan)
if (chan < 4) { if (chan < 4) {
irq = DMTE0_IRQ + chan; irq = DMTE0_IRQ + chan;
} else { } else {
irq = DMTE4_IRQ + chan; irq = DMTE4_IRQ + chan - 4;
} }
return irq; return irq;
} }
static inline int get_dmte_chan(unsigned int irq)
{
int chan;
if ((irq - DMTE4_IRQ) < 0) {
chan = irq - DMTE0_IRQ;
} else {
chan = irq - DMTE4_IRQ + 4;
}
return chan;
}
/* /*
* We determine the correct shift size based off of the CHCR transmit size * We determine the correct shift size based off of the CHCR transmit size
* for the given channel. Since we know that it will take: * for the given channel. Since we know that it will take:
...@@ -106,54 +93,42 @@ static inline unsigned int calc_xmit_shift(struct dma_info *info) ...@@ -106,54 +93,42 @@ static inline unsigned int calc_xmit_shift(struct dma_info *info)
return ts_shift[(sh_dmac->channel[info->chan].chcr >> 4) & 0x0007]; return ts_shift[(sh_dmac->channel[info->chan].chcr >> 4) & 0x0007];
} }
/*
* The transfer end interrupt must read the chcr register to end the
* hardware interrupt active condition.
* Besides that it needs to waken any waiting process, which should handle
* setting up the next transfer.
*/
static irqreturn_t dma_tei(int irq, void *dev_id, struct pt_regs *regs) static irqreturn_t dma_tei(int irq, void *dev_id, struct pt_regs *regs)
{ {
struct dma_info * info = (struct dma_info *)dev_id;
u32 chcr = sh_dmac->channel[info->chan].chcr;
int chan = get_dmte_chan(irq); if (!(chcr & CHCR_TE))
struct dma_info *info = get_dma_info(chan); return IRQ_NONE;
if (info->sar) sh_dmac->channel[info->chan].chcr = chcr & ~(CHCR_IE | CHCR_DE);
sh_dmac->channel[info->chan].sar = info->sar;
if (info->dar)
sh_dmac->channel[info->chan].sar = info->dar;
sh_dmac->channel[info->chan].dmatcr = info->count >> calc_xmit_shift(info); wake_up(&info->wait_queue);
sh_dmac->channel[info->chan].chcr &= ~CHCR_TE;
disable_irq(irq);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static struct irqaction irq_tei = {
.name = "DMAC Transfer End",
.handler = dma_tei,
.flags = SA_INTERRUPT,
};
static int sh_dmac_request_dma(struct dma_info *info) static int sh_dmac_request_dma(struct dma_info *info)
{ {
int irq = get_dmte_irq(info->chan); return request_irq(get_dmte_irq(info->chan), dma_tei,
char *p = (char *)((&irq_tei)->name); SA_INTERRUPT, "DMAC Transfer End", info);
sprintf(p, "%s (Channel %d)", p, info->chan);
make_ipr_irq(irq, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY);
return setup_irq(irq, &irq_tei);
} }
static void sh_dmac_free_dma(struct dma_info *info) static void sh_dmac_free_dma(struct dma_info *info)
{ {
free_irq(get_dmte_irq(info->chan), 0); free_irq(get_dmte_irq(info->chan), info);
} }
static void sh_dmac_configure_channel(struct dma_info *info, unsigned long chcr) static void sh_dmac_configure_channel(struct dma_info *info, unsigned long chcr)
{ {
if (!chcr) { if (!chcr)
chcr = sh_dmac->channel[info->chan].chcr; chcr = RS_DUAL;
chcr |= /* CHCR_IE | */ RS_DUAL;
}
sh_dmac->channel[info->chan].chcr = chcr; sh_dmac->channel[info->chan].chcr = chcr;
...@@ -162,12 +137,18 @@ static void sh_dmac_configure_channel(struct dma_info *info, unsigned long chcr) ...@@ -162,12 +137,18 @@ static void sh_dmac_configure_channel(struct dma_info *info, unsigned long chcr)
static void sh_dmac_enable_dma(struct dma_info *info) static void sh_dmac_enable_dma(struct dma_info *info)
{ {
sh_dmac->channel[info->chan].chcr |= CHCR_DE; int irq = get_dmte_irq(info->chan);
sh_dmac->channel[info->chan].chcr |= (CHCR_DE | CHCR_IE);
enable_irq(irq);
} }
static void sh_dmac_disable_dma(struct dma_info *info) static void sh_dmac_disable_dma(struct dma_info *info)
{ {
sh_dmac->channel[info->chan].chcr &= ~(CHCR_DE | CHCR_TE); int irq = get_dmte_irq(info->chan);
disable_irq(irq);
sh_dmac->channel[info->chan].chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
} }
static int sh_dmac_xfer_dma(struct dma_info *info) static int sh_dmac_xfer_dma(struct dma_info *info)
...@@ -191,10 +172,14 @@ static int sh_dmac_xfer_dma(struct dma_info *info) ...@@ -191,10 +172,14 @@ static int sh_dmac_xfer_dma(struct dma_info *info)
* In this case, only one address can be defined, anything else will * In this case, only one address can be defined, anything else will
* result in a DMA address error interrupt (at least on the SH-4), * result in a DMA address error interrupt (at least on the SH-4),
* which will subsequently halt the transfer. * which will subsequently halt the transfer.
*
* Channel 2 on the Dreamcast is a special case, as this is used for
* cascading to the PVR2 DMAC. In this case, we still need to write
* SAR and DAR, regardless of value, in order for cascading to work.
*/ */
if (info->sar) if (info->sar || (mach_is_dreamcast() && info->chan == 2))
sh_dmac->channel[info->chan].sar = info->sar; sh_dmac->channel[info->chan].sar = info->sar;
if (info->dar) if (info->dar || (mach_is_dreamcast() && info->chan == 2))
sh_dmac->channel[info->chan].dar = info->dar; sh_dmac->channel[info->chan].dar = info->dar;
sh_dmac->channel[info->chan].dmatcr = info->count >> calc_xmit_shift(info); sh_dmac->channel[info->chan].dmatcr = info->count >> calc_xmit_shift(info);
...@@ -206,6 +191,9 @@ static int sh_dmac_xfer_dma(struct dma_info *info) ...@@ -206,6 +191,9 @@ static int sh_dmac_xfer_dma(struct dma_info *info)
static int sh_dmac_get_dma_residue(struct dma_info *info) static int sh_dmac_get_dma_residue(struct dma_info *info)
{ {
if (!(sh_dmac->channel[info->chan].chcr & CHCR_DE))
return 0;
return sh_dmac->channel[info->chan].dmatcr << calc_xmit_shift(info); return sh_dmac->channel[info->chan].dmatcr << calc_xmit_shift(info);
} }
...@@ -221,12 +209,6 @@ static irqreturn_t dma_err(int irq, void *dev_id, struct pt_regs *regs) ...@@ -221,12 +209,6 @@ static irqreturn_t dma_err(int irq, void *dev_id, struct pt_regs *regs)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static struct irqaction irq_err = {
.name = "DMAC Address Error",
.handler = dma_err,
.flags = SA_INTERRUPT,
};
#endif #endif
static struct dma_ops sh_dmac_ops = { static struct dma_ops sh_dmac_ops = {
...@@ -244,15 +226,21 @@ static int __init sh_dmac_init(void) ...@@ -244,15 +226,21 @@ static int __init sh_dmac_init(void)
#ifdef CONFIG_CPU_SH4 #ifdef CONFIG_CPU_SH4
make_ipr_irq(DMAE_IRQ, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY); make_ipr_irq(DMAE_IRQ, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY);
setup_irq(DMAE_IRQ, &irq_err); i = request_irq(DMAE_IRQ, dma_err, SA_INTERRUPT, "DMAC Address Error", 0);
if (i < 0)
return i;
#endif #endif
/* Kick the DMAOR */ for (i = 0; i < MAX_DMAC_CHANNELS; i++) {
sh_dmac->dmaor |= DMAOR_DME /* | 0x200 */ | 0x8000; /* DDT = 1, PR1 = 1, DME = 1 */ int irq = get_dmte_irq(i);
sh_dmac->dmaor &= ~(DMAOR_NMIF | DMAOR_AE);
make_ipr_irq(irq, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY);
for (i = 0; i < MAX_DMAC_CHANNELS; i++)
dma_info[i].ops = &sh_dmac_ops; dma_info[i].ops = &sh_dmac_ops;
dma_info[i].tei_capable = 1;
}
sh_dmac->dmaor |= 0x8000 | DMAOR_DME;
return register_dmac(&sh_dmac_ops); return register_dmac(&sh_dmac_ops);
} }
......
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
#define DM_DEC 0x00008000 #define DM_DEC 0x00008000
#define SM_INC 0x00001000 #define SM_INC 0x00001000
#define SM_DEC 0x00002000 #define SM_DEC 0x00002000
#define RS_DUAL 0x00000000
#define RS_IN 0x00000200 #define RS_IN 0x00000200
#define RS_OUT 0x00000300 #define RS_OUT 0x00000300
#define TM_BURST 0x0000080 #define TM_BURST 0x0000080
...@@ -37,6 +36,11 @@ ...@@ -37,6 +36,11 @@
#define CHCR_TE 0x00000002 #define CHCR_TE 0x00000002
#define CHCR_IE 0x00000004 #define CHCR_IE 0x00000004
/* Define the default configuration for dual address memory-memory transfer.
* The 0x400 value represents auto-request, external->external.
*/
#define RS_DUAL (DM_INC | SM_INC | 0x400 | TS_32)
#define DMAOR_COD 0x00000008 #define DMAOR_COD 0x00000008
#define DMAOR_AE 0x00000004 #define DMAOR_AE 0x00000004
#define DMAOR_NMIF 0x00000002 #define DMAOR_NMIF 0x00000002
......
...@@ -14,6 +14,7 @@ config PCI ...@@ -14,6 +14,7 @@ config PCI
config SH_PCIDMA_NONCOHERENT config SH_PCIDMA_NONCOHERENT
bool "Cache and PCI noncoherent" bool "Cache and PCI noncoherent"
depends on PCI depends on PCI
default y
help help
Enable this option if your platform does not have a CPU cache which Enable this option if your platform does not have a CPU cache which
remains coherent with PCI DMA. It is safest to say 'Y', although you remains coherent with PCI DMA. It is safest to say 'Y', although you
...@@ -39,8 +40,3 @@ config PCI_AUTO_UPDATE_RESOURCES ...@@ -39,8 +40,3 @@ config PCI_AUTO_UPDATE_RESOURCES
with its resources updated beyond what they are when the device with its resources updated beyond what they are when the device
is powered up, set this to N. Everyone else will want this as Y. is powered up, set this to N. Everyone else will want this as Y.
config PCI_DMA
bool
depends on PCI
default y if !SH_DREAMCAST
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
obj-y += pci.o obj-y += pci.o
obj-$(CONFIG_PCI_AUTO) += pci-auto.o obj-$(CONFIG_PCI_AUTO) += pci-auto.o
obj-$(CONFIG_PCI_DMA) += pci-dma.o
obj-$(CONFIG_CPU_SUBTYPE_ST40STB1) += pci-st40.o obj-$(CONFIG_CPU_SUBTYPE_ST40STB1) += pci-st40.o
obj-$(CONFIG_CPU_SUBTYPE_SH7751) += pci-sh7751.o obj-$(CONFIG_CPU_SUBTYPE_SH7751) += pci-sh7751.o
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
static int gapspci_dma_used = 0; static int gapspci_dma_used = 0;
void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, void *__pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t * dma_handle) dma_addr_t * dma_handle)
{ {
unsigned long buf; unsigned long buf;
...@@ -52,7 +52,7 @@ void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, ...@@ -52,7 +52,7 @@ void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
return (void *)buf; return (void *)buf;
} }
void pci_free_consistent(struct pci_dev *hwdev, size_t size, void __pci_free_consistent(struct pci_dev *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle) void *vaddr, dma_addr_t dma_handle)
{ {
/* XXX */ /* XXX */
......
/*
* Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
*
* May be copied or modified under the terms of the GNU General Public
* License. See linux/COPYING for more information.
*
* Dynamic DMA mapping support.
*/
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <asm/io.h>
#include <asm/addrspace.h>
void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t * dma_handle)
{
void *ret;
int gfp = GFP_ATOMIC;
ret = (void *) __get_free_pages(gfp, get_order(size));
if (ret != NULL) {
/* Is it necessary to do the memset? */
memset(ret, 0, size);
*dma_handle = virt_to_phys(ret);
}
/* We must flush the cache before we pass it on to the device */
dma_cache_wback_inv(ret, size);
return P2SEGADDR(ret);
}
void pci_free_consistent(struct pci_dev *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
unsigned long p1addr=P1SEGADDR((unsigned long)vaddr);
free_pages(p1addr, get_order(size));
}
/*
* arch/sh/mm/consistent.c
*
* Copyright (C) 2004 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle)
{
struct page *page, *end, *free;
void *ret;
int order;
size = PAGE_ALIGN(size);
order = get_order(size);
page = alloc_pages(gfp, order);
if (!page)
return NULL;
ret = (void *)P2SEGADDR(page_to_bus(page));
/*
* We must flush the cache before we pass it on to the device
*/
dma_cache_wback_inv(ret, size);
*handle = (unsigned long)ret;
free = page + (size >> PAGE_SHIFT);
end = page + (1 << order);
do {
set_page_count(page, 1);
page++;
} while (size -= PAGE_SIZE);
/*
* Free any unused pages
*/
while (page < end) {
set_page_count(page, 1);
__free_page(page);
page++;
}
return ret;
}
void consistent_free(void *vaddr, size_t size)
{
unsigned long addr = P1SEGADDR((unsigned long)vaddr);
free_pages(addr, get_order(size));
}
void consistent_sync(void *vaddr, size_t size, int direction)
{
switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */
dma_cache_inv(vaddr, size);
break;
case DMA_TO_DEVICE: /* writeback only */
dma_cache_wback(vaddr, size);
break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
dma_cache_wback_inv(vaddr, size);
break;
default:
BUG();
}
}
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include <asm/io.h> #include <asm/io.h>
/* Channel to use for page ops, must be dual-address mode capable. */ /* Channel to use for page ops, must be dual-address mode capable. */
static int dma_channel = 3; static int dma_channel = CONFIG_DMA_PAGE_OPS_CHANNEL;
static void copy_page_dma(void *to, void *from) static void copy_page_dma(void *to, void *from)
{ {
......
#ifndef __ASM_CPU_SH4_DMA_H #ifndef __ASM_CPU_SH4_DMA_H
#define __ASM_CPU_SH4_DMA_H #define __ASM_CPU_SH4_DMA_H
#define SH_DMAC_BASE 0xbfa00000 #define SH_DMAC_BASE 0xffa00000
#endif /* __ASM_CPU_SH4_DMA_H */ #endif /* __ASM_CPU_SH4_DMA_H */
#include <asm-generic/dma-mapping.h> #ifndef __ASM_SH_DMA_MAPPING_H
#define __ASM_SH_DMA_MAPPING_H
#include <linux/config.h>
#include <linux/mm.h>
#include <linux/device.h>
#include <asm/scatterlist.h>
#include <asm/io.h>
/* arch/sh/mm/consistent.c */
extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle);
extern void consistent_free(void *vaddr, size_t size);
extern void consistent_sync(void *vaddr, size_t size, int direction);
#ifdef CONFIG_SH_DREAMCAST
struct pci_dev;
extern struct bus_type pci_bus_type;
extern void *__pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle);
extern void __pci_free_consistent(struct pci_dev *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle);
#endif
#define dma_supported(dev, mask) (1)
static inline int dma_set_mask(struct device *dev, u64 mask)
{
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
*dev->dma_mask = mask;
return 0;
}
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, int flag)
{
/*
* Some platforms have special pci_alloc_consistent() implementations,
* in these instances we can't use the generic consistent_alloc().
*/
#ifdef CONFIG_SH_DREAMCAST
if (dev && dev->bus == &pci_bus_type)
return __pci_alloc_consistent(NULL, size, dma_handle);
#endif
return consistent_alloc(flag, size, dma_handle);
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
/*
* Same note as above applies to pci_free_consistent()..
*/
#ifdef CONFIG_SH_DREAMCAST
if (dev && dev->bus == &pci_bus_type) {
__pci_free_consistent(NULL, size, vaddr, dma_handle);
return;
}
#endif
consistent_free(vaddr, size);
}
static inline void dma_cache_sync(void *vaddr, size_t size,
enum dma_data_direction dir)
{
consistent_sync(vaddr, size, (int)dir);
}
static inline dma_addr_t dma_map_single(struct device *dev,
void *ptr, size_t size,
enum dma_data_direction dir)
{
#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
if (dev->bus == &pci_bus_type)
return virt_to_bus(ptr);
#endif
dma_cache_sync(ptr, size, dir);
return virt_to_bus(ptr);
}
#define dma_unmap_single(dev, addr, size, dir) do { } while (0)
static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
int i;
for (i = 0; i < nents; i++) {
#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
sg[i].length, dir);
#endif
sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
}
return nents;
}
#define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir)
{
return dma_map_single(dev, page_address(page) + offset, size, dir);
}
static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
size_t size, enum dma_data_direction dir)
{
dma_unmap_single(dev, dma_address, size, dir);
}
static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir)
{
#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
if (dev->bus == &pci_bus_type)
return;
#endif
dma_cache_sync(bus_to_virt(dma_handle), size, dir);
}
static inline void dma_sync_single_range(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction dir)
{
#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
if (dev->bus == &pci_bus_type)
return;
#endif
dma_cache_sync(bus_to_virt(dma_handle) + offset, size, dir);
}
static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
int i;
for (i = 0; i < nelems; i++) {
#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
sg[i].length, dir);
#endif
sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
}
}
static inline int dma_get_cache_alignment(void)
{
/*
* Each processor family will define its own L1_CACHE_SHIFT,
* L1_CACHE_BYTES wraps to this, so this is always safe.
*/
return L1_CACHE_BYTES;
}
#endif /* __ASM_SH_DMA_MAPPING_H */
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/wait.h>
#include <asm/cpu/dma.h> #include <asm/cpu/dma.h>
#include <asm/semaphore.h> #include <asm/semaphore.h>
...@@ -63,11 +64,13 @@ struct dma_info { ...@@ -63,11 +64,13 @@ struct dma_info {
unsigned long dar; unsigned long dar;
unsigned int configured:1; unsigned int configured:1;
unsigned int tei_capable:1;
atomic_t busy; atomic_t busy;
struct semaphore sem; struct semaphore sem;
wait_queue_head_t wait_queue;
struct dma_ops *ops; struct dma_ops *ops;
} __attribute__ ((packed)); };
/* arch/sh/drivers/dma/dma-api.c */ /* arch/sh/drivers/dma/dma-api.c */
extern int dma_xfer(unsigned int chan, unsigned long from, extern int dma_xfer(unsigned int chan, unsigned long from,
......
...@@ -3,9 +3,7 @@ ...@@ -3,9 +3,7 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/config.h> #include <linux/dma-mapping.h>
#include <linux/mm.h> /* for struct page */
#include <asm/cacheflush.h>
/* Can be used to override the logic in pci_scan_bus for skipping /* Can be used to override the logic in pci_scan_bus for skipping
already-configured bus numbers - to be used for buggy BIOSes already-configured bus numbers - to be used for buggy BIOSes
...@@ -59,45 +57,6 @@ static inline void pcibios_penalize_isa_irq(int irq) ...@@ -59,45 +57,6 @@ static inline void pcibios_penalize_isa_irq(int irq)
#include <linux/string.h> #include <linux/string.h>
#include <asm/io.h> #include <asm/io.h>
/* Allocate and map kernel buffer using consistent mode DMA for a device.
* hwdev should be valid struct pci_dev pointer for PCI devices,
* NULL for PCI-like buses (ISA, EISA).
* Returns non-NULL cpu-view pointer to the buffer if successful and
* sets *dma_addrp to the pci side dma address as well, else *dma_addrp
* is undefined.
*/
extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle);
/* Free and unmap a consistent DMA buffer.
* cpu_addr is what was returned from pci_alloc_consistent,
* size must be the same as what as passed into pci_alloc_consistent,
* and likewise dma_addr must be the same as what *dma_addrp was set to.
*
* References to the memory and mappings associated with cpu_addr/dma_addr
* past this call are illegal.
*/
extern void pci_free_consistent(struct pci_dev *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle);
/* Map a single buffer of the indicated size for DMA in streaming mode.
* The 32-bit bus address to use is returned.
*
* Once the device is given the dma address, the device owns this memory
* until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed.
*/
static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
size_t size, int direction)
{
if (direction == PCI_DMA_NONE)
BUG();
#ifdef CONFIG_SH_PCIDMA_NONCOHERENT
dma_cache_wback_inv(ptr, size);
#endif
return virt_to_bus(ptr);
}
/* pci_unmap_{single,page} being a nop depends upon the /* pci_unmap_{single,page} being a nop depends upon the
* configuration. * configuration.
*/ */
...@@ -123,134 +82,6 @@ static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, ...@@ -123,134 +82,6 @@ static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) #define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
#endif #endif
/* Unmap a single streaming mode DMA translation. The dma_addr and size
* must match what was provided for in a previous pci_map_single call. All
* other usages are undefined.
*
* After this call, reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there.
*/
static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
size_t size,int direction)
{
/* Nothing to do */
}
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scather-gather version of the
* above pci_map_single interface. Here the scatter gather list
* elements are each tagged with the appropriate dma address
* and length. They are obtained via sg_dma_{address,length}(SG).
*
* NOTE: An implementation may be able to use a smaller number of
* DMA address/length pairs than there are SG table elements.
* (for example via virtual mapping capabilities)
* The routine returns the number of addr/length pairs actually
* used, at most nents.
*
* Device ownership issues as mentioned above for pci_map_single are
* the same here.
*/
static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
{
#ifdef CONFIG_SH_PCIDMA_NONCOHERENT
int i;
for (i=0; i<nents; i++) {
dma_cache_wback_inv(page_address(sg[i].page) + sg[i].offset, sg[i].length);
sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
}
#endif
if (direction == PCI_DMA_NONE)
BUG();
return nents;
}
/* Unmap a set of streaming mode DMA translations.
* Again, cpu read rules concerning calls here are the same as for
* pci_unmap_single() above.
*/
static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
{
/* Nothing to do */
}
/* Make physical memory consistent for a single
* streaming mode DMA translation after a transfer.
*
* If you perform a pci_map_single() but wish to interrogate the
* buffer using the cpu, yet do not wish to teardown the PCI dma
* mapping, you must call this function before doing so. At the
* next point you give the PCI dma address back to the card, you
* must first perform a pci_dma_sync_for_device, and then the device
* again owns the buffer.
*/
static inline void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev,
dma_addr_t dma_handle,
size_t size, int direction)
{
if (direction == PCI_DMA_NONE)
BUG();
}
static inline void pci_dma_sync_single_for_device(struct pci_dev *hwdev,
dma_addr_t dma_handle,
size_t size, int direction)
{
if (direction == PCI_DMA_NONE)
BUG();
#ifdef CONFIG_SH_PCIDMA_NONCOHERENT
dma_cache_wback_inv(bus_to_virt(dma_handle), size);
#endif
}
/* Make physical memory consistent for a set of streaming
* mode DMA translations after a transfer.
*
* The same as pci_dma_sync_single_* but for a scatter-gather list,
* same rules and usage.
*/
static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev,
struct scatterlist *sg,
int nelems, int direction)
{
if (direction == PCI_DMA_NONE)
BUG();
}
static inline void pci_dma_sync_sg_for_device(struct pci_dev *hwdev,
struct scatterlist *sg,
int nelems, int direction)
{
if (direction == PCI_DMA_NONE)
BUG();
#ifdef CONFIG_SH_PCIDMA_NONCOHERENT
int i;
for (i=0; i<nelems; i++) {
dma_cache_wback_inv(page_address(sg[i].page) + sg[i].offset, sg[i].length);
sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
}
#endif
}
/* Return whether the given PCI device DMA address mask can
* be supported properly. For example, if your device can
* only drive the low 24-bits during PCI bus mastering, then
* you would pass 0x00ffffff as the mask to this function.
*/
static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
{
return 1;
}
/* Not supporting more than 32-bit PCI bus addresses now, but /* Not supporting more than 32-bit PCI bus addresses now, but
* must satisfy references to this function. Change if needed. * must satisfy references to this function. Change if needed.
*/ */
...@@ -282,5 +113,8 @@ static inline void pcibios_add_platform_entries(struct pci_dev *dev) ...@@ -282,5 +113,8 @@ static inline void pcibios_add_platform_entries(struct pci_dev *dev)
/* generic pci stuff */ /* generic pci stuff */
#include <asm-generic/pci.h> #include <asm-generic/pci.h>
/* generic DMA-mapping stuff */
#include <asm-generic/pci-dma-compat.h>
#endif /* __ASM_SH_PCI_H */ #endif /* __ASM_SH_PCI_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment