Commit b0ac3ca4 authored by Pete Zaitcev's avatar Pete Zaitcev Committed by David S. Miller

[SPARC]: The iommu rewrite.

parent e6e88dfa
......@@ -61,13 +61,6 @@ static struct resource _sparc_dvma = {
"sparc_iomap", IOBASE_VADDR, IOBASE_END - 1
};
/*
* BTFIXUP would do as well but it seems overkill for the case.
*/
static void (*_sparc_mapioaddr)(unsigned long pa, unsigned long va,
int bus, int ro);
static void (*_sparc_unmapioaddr)(unsigned long va);
/*
* Our mini-allocator...
* Boy this is gross! We need it because we must map I/O for
......@@ -201,8 +194,6 @@ static void *
_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz)
{
unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK);
unsigned long va;
unsigned int psz;
if (allocate_resource(&sparc_iomap, res,
(offset + sz + PAGE_SIZE-1) & PAGE_MASK,
......@@ -213,27 +204,10 @@ _sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz)
prom_halt();
}
va = res->start;
pa &= PAGE_MASK;
for (psz = res->end - res->start + 1; psz != 0; psz -= PAGE_SIZE) {
(*_sparc_mapioaddr)(pa, va, bus, 0);
va += PAGE_SIZE;
pa += PAGE_SIZE;
}
sparc_mapiorange(bus, pa, res->start, res->end - res->start + 1);
/*
* XXX Playing with implementation details here.
* On sparc64 Ebus has resources with precise boundaries.
* We share drivers with sparc64. Too clever drivers use
* start of a resource instead of a base address.
*
* XXX-2 This may be not valid anymore, clean when
* interface to sbus_ioremap() is resolved.
*/
res->start += offset;
res->end = res->start + sz - 1; /* not strictly necessary.. */
return (void *) res->start;
return (void *) (res->start + offset);
}
/*
......@@ -244,12 +218,8 @@ static void _sparc_free_io(struct resource *res)
unsigned long plen;
plen = res->end - res->start + 1;
plen = (plen + PAGE_SIZE-1) & PAGE_MASK;
while (plen != 0) {
plen -= PAGE_SIZE;
(*_sparc_unmapioaddr)(res->start + plen);
}
if ((plen & (PAGE_SIZE-1)) != 0) BUG();
sparc_unmapiorange(res->start, plen);
release_resource(res);
}
......@@ -283,40 +253,44 @@ void *sbus_alloc_consistent(struct sbus_dev *sdev, long len, u32 *dma_addrp)
}
order = get_order(len_total);
va = __get_free_pages(GFP_KERNEL, order);
if (va == 0) {
/*
* printk here may be flooding... Consider removal XXX.
*/
printk("sbus_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT);
return NULL;
}
if ((va = __get_free_pages(GFP_KERNEL, order)) == 0)
goto err_nopages;
if ((res = kmalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
free_pages(va, order);
printk("sbus_alloc_consistent: no core\n");
return NULL;
}
if ((res = kmalloc(sizeof(struct resource), GFP_KERNEL)) == NULL)
goto err_nomem;
memset((char*)res, 0, sizeof(struct resource));
if (allocate_resource(&_sparc_dvma, res, len_total,
_sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total);
free_pages(va, order);
kfree(res);
return NULL;
goto err_nova;
}
mmu_inval_dma_area(va, len_total);
// XXX The mmu_map_dma_area does this for us below, see comments.
// sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
/*
* XXX That's where sdev would be used. Currently we load
* all iommu tables with the same translations.
*/
if (mmu_map_dma_area(dma_addrp, va, res->start, len_total) != 0)
goto err_noiommu;
mmu_map_dma_area(va, res->start, len_total);
*dma_addrp = res->start;
return (void *)res->start;
err_noiommu:
release_resource(res);
err_nova:
free_pages(va, order);
err_nomem:
kfree(res);
err_nopages:
return NULL;
}
void sbus_free_consistent(struct sbus_dev *sdev, long n, void *p, u32 ba)
{
struct resource *res;
unsigned long pgp;
struct page *pgv;
if ((res = _sparc_find_resource(&_sparc_dvma,
(unsigned long)p)) == NULL) {
......@@ -340,10 +314,10 @@ void sbus_free_consistent(struct sbus_dev *sdev, long n, void *p, u32 ba)
kfree(res);
/* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */
pgp = (unsigned long) phys_to_virt(mmu_translate_dvma(ba));
pgv = mmu_translate_dvma(ba);
mmu_unmap_dma_area(ba, n);
free_pages(pgp, get_order(n));
__free_pages(pgv, get_order(n));
}
/*
......@@ -353,39 +327,6 @@ void sbus_free_consistent(struct sbus_dev *sdev, long n, void *p, u32 ba)
*/
dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *va, size_t len, int direction)
{
#if 0 /* This is the version that abuses consistent space */
unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
struct resource *res;
/* XXX why are some lenghts signed, others unsigned? */
if (len <= 0) {
return 0;
}
/* XXX So what is maxphys for us and how do drivers know it? */
if (len > 256*1024) { /* __get_free_pages() limit */
return 0;
}
if ((res = kmalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
printk("sbus_map_single: no core\n");
return 0;
}
memset((char*)res, 0, sizeof(struct resource));
res->name = va; /* XXX */
if (allocate_resource(&_sparc_dvma, res, len_total,
_sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE) != 0) {
printk("sbus_map_single: cannot occupy 0x%lx", len);
kfree(res);
return 0;
}
mmu_map_dma_area(va, res->start, len_total);
mmu_flush_dma_area((unsigned long)va, len_total); /* in all contexts? */
return res->start;
#endif
#if 1 /* "trampoline" version */
/* XXX why are some lenghts signed, others unsigned? */
if (len <= 0) {
return 0;
......@@ -395,36 +336,11 @@ dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *va, size_t len, int dire
return 0;
}
return mmu_get_scsi_one(va, len, sdev->bus);
#endif
}
void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t ba, size_t n, int direction)
{
#if 0 /* This is the version that abuses consistent space */
struct resource *res;
unsigned long va;
if ((res = _sparc_find_resource(&_sparc_dvma, ba)) == NULL) {
printk("sbus_unmap_single: cannot find %08x\n", (unsigned)ba);
return;
}
n = (n + PAGE_SIZE-1) & PAGE_MASK;
if ((res->end-res->start)+1 != n) {
printk("sbus_unmap_single: region 0x%lx asked 0x%lx\n",
(long)((res->end-res->start)+1), n);
return;
}
va = (unsigned long) res->name; /* XXX Ouch */
mmu_inval_dma_area(va, n); /* in all contexts, mm's?... */
mmu_unmap_dma_area(ba, n); /* iounit cache flush is here */
release_resource(res);
kfree(res);
#endif
#if 1 /* "trampoline" version */
mmu_release_scsi_one(ba, n, sdev->bus);
#endif
}
int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction)
......@@ -456,7 +372,7 @@ void sbus_dma_sync_single(struct sbus_dev *sdev, dma_addr_t ba, size_t size, int
if (res == NULL)
panic("sbus_dma_sync_single: 0x%x\n", ba);
va = (unsigned long) phys_to_virt(mmu_translate_dvma(ba));
va = page_address(mmu_translate_dvma(ba)); /* XXX higmem */
/*
* XXX This bogosity will be fixed with the iommu rewrite coming soon
* to a kernel near you. - Anton
......@@ -511,24 +427,12 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
kfree(res);
return NULL;
}
mmu_inval_dma_area(va, len_total);
#if 0
/* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %x\n",
/* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n",
(long)va, (long)res->start, (long)virt_to_phys(va), len_total);
#endif
{
unsigned long xva, xpa;
xva = res->start;
xpa = virt_to_phys(va);
while (len_total != 0) {
len_total -= PAGE_SIZE;
(*_sparc_mapioaddr)(xpa, xva, 0, 0);
xva += PAGE_SIZE;
xpa += PAGE_SIZE;
}
}
sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
*pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
return (void *) res->start;
......@@ -567,12 +471,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
pgp = (unsigned long) phys_to_virt(ba); /* bus_to_virt actually */
mmu_inval_dma_area(pgp, n);
{
int x;
for (x = 0; x < n; x += PAGE_SIZE) {
(*_sparc_unmapioaddr)((unsigned long)p + n);
}
}
sparc_unmapiorange((unsigned long)p, n);
release_resource(res);
kfree(res);
......@@ -751,37 +650,6 @@ _sparc_find_resource(struct resource *root, unsigned long hit)
return NULL;
}
/*
* Necessary boot time initializations.
*/
void ioport_init(void)
{
extern void sun4c_mapioaddr(unsigned long, unsigned long, int, int);
extern void srmmu_mapioaddr(unsigned long, unsigned long, int, int);
extern void sun4c_unmapioaddr(unsigned long);
extern void srmmu_unmapioaddr(unsigned long);
switch(sparc_cpu_model) {
case sun4c:
case sun4:
case sun4e:
_sparc_mapioaddr = sun4c_mapioaddr;
_sparc_unmapioaddr = sun4c_unmapioaddr;
break;
case sun4m:
case sun4d:
_sparc_mapioaddr = srmmu_mapioaddr;
_sparc_unmapioaddr = srmmu_unmapioaddr;
break;
default:
printk("ioport_init: cpu type %d is unknown.\n",
sparc_cpu_model);
prom_halt();
};
}
void register_proc_sparc_ioport(void)
{
#ifdef CONFIG_PROC_FS
......
......@@ -10,4 +10,4 @@ obj-y := mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \
strlen.o checksum.o blockops.o memscan.o memcmp.o strncmp.o \
strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \
copy_user.o locks.o atomic.o bitops.o debuglocks.o lshrdi3.o \
ashldi3.o rwsem.o muldi3.o
ashldi3.o rwsem.o muldi3.o bitext.o
/*
* bitext.c: kernel little helper (of bit shuffling variety).
*
* Copyright (C) 2002 Pete Zaitcev <zaitcev@yahoo.com>
*
* The algorithm to search a zero bit string is geared towards its application.
* We expect a couple of fixed sizes of requests, so a rotating counter, reset
* by align size, should provide fast enough search while maintaining low
* fragmentation.
*/
#include <linux/smp_lock.h>
#include <asm/bitext.h>
#include <asm/bitops.h>
/**
* bit_map_string_get - find and set a bit string in bit map.
* @t: the bit map.
* @len: requested string length
* @align: requested alignment
*
* Returns offset in the map or -1 if out of space.
*
* Not safe to call from an interrupt (uses spin_lock).
*/
int bit_map_string_get(struct bit_map *t, int len, int align)
{
int offset, count; /* siamese twins */
int off_new;
int align1;
int i;
if (align == 0)
align = 1;
align1 = align - 1;
if ((align & align1) != 0)
BUG();
if (align < 0 || align >= t->size)
BUG();
if (len <= 0 || len > t->size)
BUG();
spin_lock(&t->lock);
offset = t->last_off & ~align1;
count = 0;
for (;;) {
off_new = find_next_zero_bit(t->map, t->size, offset);
off_new = (off_new + align1) & ~align1;
count += off_new - offset;
offset = off_new;
if (offset >= t->size)
offset = 0;
if (count + len > t->size) {
spin_unlock(&t->lock);
/* P3 */ printk(KERN_ERR
"bitmap out: size %d used %d off %d len %d align %d count %d\n",
t->size, t->used, offset, len, align, count);
return -1;
}
if (offset + len > t->size) {
offset = 0;
count += t->size - offset;
continue;
}
i = 0;
while (test_bit(offset + i, t->map) == 0) {
i++;
if (i == len) {
for (i = 0; i < len; i++)
__set_bit(offset + i, t->map);
if ((t->last_off = offset + len) >= t->size)
t->last_off = 0;
t->used += len;
spin_unlock(&t->lock);
return offset;
}
}
count += i + 1;
if ((offset += i + 1) >= t->size)
offset = 0;
}
}
void bit_map_clear(struct bit_map *t, int offset, int len)
{
int i;
if (t->used < len)
BUG(); /* Much too late to do any good, but alas... */
spin_lock(&t->lock);
for (i = 0; i < len; i++) {
if (test_bit(offset + i, t->map) == 0)
BUG();
__clear_bit(offset + i, t->map);
}
t->used -= len;
spin_unlock(&t->lock);
}
void bit_map_init(struct bit_map *t, unsigned long *map, int size)
{
if ((size & 07) != 0)
BUG();
memset(map, 0, size>>3);
memset(t, 0, sizeof *t);
spin_lock_init(&t->lock);
t->map = map;
t->size = size;
}
......@@ -88,8 +88,6 @@ void show_mem(void)
#endif
}
extern pgprot_t protection_map[16];
void __init sparc_context_init(int numctx)
{
int ctx;
......
......@@ -176,13 +176,15 @@ static void iounit_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_
}
#ifdef CONFIG_SBUS
static void iounit_map_dma_area(unsigned long va, __u32 addr, int len)
static int iounit_map_dma_area(dma_addr_t *pba, unsigned long va, __u32 addr, int len)
{
unsigned long page, end;
pgprot_t dvma_prot;
iopte_t *iopte;
struct sbus_bus *sbus;
*pba = addr;
dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
end = PAGE_ALIGN((addr + len));
while(addr < end) {
......@@ -213,6 +215,8 @@ static void iounit_map_dma_area(unsigned long va, __u32 addr, int len)
}
flush_cache_all();
flush_tlb_all();
return 0;
}
static void iounit_unmap_dma_area(unsigned long addr, int len)
......@@ -221,7 +225,7 @@ static void iounit_unmap_dma_area(unsigned long addr, int len)
}
/* XXX We do not pass sbus device here, bad. */
static unsigned long iounit_translate_dvma(unsigned long addr)
static struct page *iounit_translate_dvma(unsigned long addr)
{
struct sbus_bus *sbus = sbus_root; /* They are all the same */
struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
......@@ -230,7 +234,7 @@ static unsigned long iounit_translate_dvma(unsigned long addr)
i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
iopte = (iopte_t *)(iounit->page_table + i);
return (iopte_val(*iopte) & 0xFFFFFFF0) << 4; /* XXX sun4d guru, help */
return pfn_to_page(iopte_val(*iopte) >> (PAGE_SHIFT-4)); /* XXX sun4d guru, help */
}
#endif
......
This diff is collapsed.
......@@ -26,7 +26,6 @@ unsigned int pg_iobits;
extern void ld_mmu_sun4c(void);
extern void ld_mmu_srmmu(void);
extern void ioport_init(void);
void __init load_mmu(void)
{
......@@ -44,5 +43,4 @@ void __init load_mmu(void)
prom_halt();
}
btfixup();
ioport_init();
}
......@@ -21,6 +21,7 @@
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <asm/bitext.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
......@@ -137,29 +138,26 @@ static inline int srmmu_device_memory(unsigned long x)
int srmmu_cache_pagetables;
/* these will be initialized in srmmu_nocache_calcsize() */
int srmmu_nocache_npages;
unsigned long srmmu_nocache_size;
unsigned long srmmu_nocache_end;
unsigned long pkmap_base;
unsigned long pkmap_base_end;
unsigned long srmmu_nocache_bitmap_size;
extern unsigned long fix_kmap_begin;
extern unsigned long fix_kmap_end;
/* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
#define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
void *srmmu_nocache_pool;
void *srmmu_nocache_bitmap;
int srmmu_nocache_low;
int srmmu_nocache_used;
static spinlock_t srmmu_nocache_spinlock = SPIN_LOCK_UNLOCKED;
static struct bit_map srmmu_nocache_map;
/* This makes sense. Honest it does - Anton */
#define __nocache_pa(VADDR) (((unsigned long)VADDR) - SRMMU_NOCACHE_VADDR + __pa((unsigned long)srmmu_nocache_pool))
#define __nocache_va(PADDR) (__va((unsigned long)PADDR) - (unsigned long)srmmu_nocache_pool + SRMMU_NOCACHE_VADDR)
#define __nocache_fix(VADDR) __va(__nocache_pa(VADDR))
static inline unsigned long srmmu_pte_pfn(pte_t pte)
static unsigned long srmmu_pte_pfn(pte_t pte)
{
if (srmmu_device_memory(pte_val(pte))) {
/* XXX Anton obviously had something in mind when he did this.
......@@ -219,15 +217,6 @@ static inline int srmmu_pgd_present(pgd_t pgd)
static inline void srmmu_pgd_clear(pgd_t * pgdp)
{ srmmu_set_pte((pte_t *)pgdp, __pte(0)); }
static inline int srmmu_pte_write(pte_t pte)
{ return pte_val(pte) & SRMMU_WRITE; }
static inline int srmmu_pte_dirty(pte_t pte)
{ return pte_val(pte) & SRMMU_DIRTY; }
static inline int srmmu_pte_young(pte_t pte)
{ return pte_val(pte) & SRMMU_REF; }
static inline pte_t srmmu_pte_wrprotect(pte_t pte)
{ return __pte(pte_val(pte) & ~SRMMU_WRITE);}
......@@ -321,10 +310,7 @@ static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
*/
static unsigned long __srmmu_get_nocache(int size, int align)
{
int offset = srmmu_nocache_low;
int i;
unsigned long va_tmp, phys_tmp;
int lowest_failed = 0;
int offset;
if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
printk("Size 0x%x too small for nocache request\n", size);
......@@ -334,49 +320,20 @@ static unsigned long __srmmu_get_nocache(int size, int align)
printk("Size 0x%x unaligned int nocache request\n", size);
size += SRMMU_NOCACHE_BITMAP_SHIFT-1;
}
size = size >> SRMMU_NOCACHE_BITMAP_SHIFT;
spin_lock(&srmmu_nocache_spinlock);
repeat:
offset = find_next_zero_bit(srmmu_nocache_bitmap, srmmu_nocache_bitmap_size, offset);
/* we align on physical address */
if (align) {
va_tmp = (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT));
phys_tmp = (__nocache_pa(va_tmp) + align - 1) & ~(align - 1);
va_tmp = (unsigned long)__nocache_va(phys_tmp);
offset = (va_tmp - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
}
if ((srmmu_nocache_bitmap_size - offset) < size) {
printk("Run out of nocached RAM!\n");
spin_unlock(&srmmu_nocache_spinlock);
offset = bit_map_string_get(&srmmu_nocache_map,
size >> SRMMU_NOCACHE_BITMAP_SHIFT,
align >> SRMMU_NOCACHE_BITMAP_SHIFT);
/* P3 */ /* printk("srmmu: get size %d align %d, got %d (0x%x)\n",
size >> SRMMU_NOCACHE_BITMAP_SHIFT, align >> SRMMU_NOCACHE_BITMAP_SHIFT,
offset, offset); */
if (offset == -1) {
printk("srmmu: out of nocache %d: %d/%d\n",
size, (int) srmmu_nocache_size,
srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
return 0;
}
i = 0;
while(i < size) {
if (test_bit(offset + i, srmmu_nocache_bitmap)) {
lowest_failed = 1;
offset = offset + i + 1;
goto repeat;
}
i++;
}
i = 0;
while(i < size) {
set_bit(offset + i, srmmu_nocache_bitmap);
i++;
srmmu_nocache_used++;
}
if (!lowest_failed && ((align >> SRMMU_NOCACHE_BITMAP_SHIFT) <= 1) && (offset > srmmu_nocache_low))
srmmu_nocache_low = offset;
spin_unlock(&srmmu_nocache_spinlock);
return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT));
}
......@@ -422,63 +379,57 @@ void srmmu_free_nocache(unsigned long vaddr, int size)
offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
size = size >> SRMMU_NOCACHE_BITMAP_SHIFT;
spin_lock(&srmmu_nocache_spinlock);
while(size--) {
clear_bit(offset + size, srmmu_nocache_bitmap);
srmmu_nocache_used--;
}
if (offset < srmmu_nocache_low)
srmmu_nocache_low = offset;
spin_unlock(&srmmu_nocache_spinlock);
/* P3 */ /* printk("srmmu: free off %d (0x%x) size %d\n", offset, offset, size); */
bit_map_clear(&srmmu_nocache_map, offset, size);
}
void srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end);
extern unsigned long probe_memory(void); /* in fault.c */
/* Reserve nocache dynamically proportionally to the amount of
/*
* Reserve nocache dynamically proportionally to the amount of
* system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
*/
void srmmu_nocache_calcsize(void)
{
unsigned long sysmemavail = probe_memory() / 1024;
int srmmu_nocache_npages;
srmmu_nocache_npages =
sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256;
if (sysmemavail % (SRMMU_NOCACHE_ALCRATIO * 1024))
srmmu_nocache_npages += 256;
/* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */
// if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256;
if (srmmu_nocache_npages < 550) srmmu_nocache_npages = 550;
/* anything above 1280 blows up */
if (srmmu_nocache_npages > 1280) srmmu_nocache_npages = 1280;
srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE;
srmmu_nocache_bitmap_size = srmmu_nocache_npages * 16;
srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
fix_kmap_begin = srmmu_nocache_end;
fix_kmap_end = fix_kmap_begin + (KM_TYPE_NR * NR_CPUS - 1) * PAGE_SIZE;
pkmap_base = SRMMU_NOCACHE_VADDR + srmmu_nocache_size + 0x40000;
pkmap_base_end = pkmap_base + LAST_PKMAP * PAGE_SIZE;
/* printk("system memory available = %luk\nnocache ram size = %luk\n",
sysmemavail, srmmu_nocache_size / 1024); */
}
void srmmu_nocache_init(void)
{
unsigned int bitmap_bits;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
unsigned long paddr, vaddr;
unsigned long pteval;
bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size, PAGE_SIZE, 0UL);
memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
srmmu_nocache_bitmap = __alloc_bootmem(srmmu_nocache_bitmap_size, SMP_CACHE_BYTES, 0UL);
memset(srmmu_nocache_bitmap, 0, srmmu_nocache_bitmap_size);
srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL);
bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
srmmu_swapper_pg_dir = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
......@@ -486,11 +437,12 @@ void srmmu_nocache_init(void)
srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end);
spin_lock_init(&srmmu_nocache_spinlock);
paddr = __pa((unsigned long)srmmu_nocache_pool);
vaddr = SRMMU_NOCACHE_VADDR;
/* P3 */ printk("srmmu: pool 0x%x vaddr 0x%x bitmap 0x%x bits %d (0x%x)\n",
(int)srmmu_nocache_pool, vaddr, srmmu_nocache_bitmap, bitmap_bits, bitmap_bits);
while (vaddr < srmmu_nocache_end) {
pgd = pgd_offset_k(vaddr);
pmd = srmmu_pmd_offset(__nocache_fix(pgd), vaddr);
......@@ -637,7 +589,8 @@ static void srmmu_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
}
/* Low level IO area allocation on the SRMMU. */
void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly)
static inline void srmmu_mapioaddr(unsigned long physaddr,
unsigned long virt_addr, int bus_type)
{
pgd_t *pgdp;
pmd_t *pmdp;
......@@ -656,16 +609,24 @@ void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_ty
* 36-bit physical address on the I/O space lines...
*/
tmp |= (bus_type << 28);
if(rdonly)
tmp |= SRMMU_PRIV_RDONLY;
else
tmp |= SRMMU_PRIV;
tmp |= SRMMU_PRIV;
__flush_page_to_ram(virt_addr);
srmmu_set_pte(ptep, __pte(tmp));
}
static void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
unsigned long xva, unsigned int len)
{
while (len != 0) {
len -= PAGE_SIZE;
srmmu_mapioaddr(xpa, xva, bus);
xva += PAGE_SIZE;
xpa += PAGE_SIZE;
}
flush_tlb_all();
}
void srmmu_unmapioaddr(unsigned long virt_addr)
static inline void srmmu_unmapioaddr(unsigned long virt_addr)
{
pgd_t *pgdp;
pmd_t *pmdp;
......@@ -677,6 +638,15 @@ void srmmu_unmapioaddr(unsigned long virt_addr)
/* No need to flush uncacheable page. */
srmmu_pte_clear(ptep);
}
static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
{
while (len != 0) {
len -= PAGE_SIZE;
srmmu_unmapioaddr(virt_addr);
virt_addr += PAGE_SIZE;
}
flush_tlb_all();
}
......@@ -1398,7 +1368,7 @@ static void srmmu_mmu_info(struct seq_file *m)
srmmu_name,
num_contexts,
srmmu_nocache_size,
(srmmu_nocache_used << SRMMU_NOCACHE_BITMAP_SHIFT));
srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
}
static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
......@@ -2258,7 +2228,10 @@ void __init ld_mmu_srmmu(void)
BTFIXUPSET_CALL(pte_mkyoung, srmmu_pte_mkyoung, BTFIXUPCALL_ORINT(SRMMU_REF));
BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP);
BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(sparc_mapiorange, srmmu_mapiorange, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(sparc_unmapiorange, srmmu_unmapiorange, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(alloc_thread_info, srmmu_alloc_thread_info, BTFIXUPCALL_NORM);
......
......@@ -534,10 +534,13 @@ static inline void sun4c_init_ss2_cache_bug(void)
}
/* Addr is always aligned on a page boundary for us already. */
static void sun4c_map_dma_area(unsigned long va, u32 addr, int len)
static int sun4c_map_dma_area(dma_addr_t *pba, unsigned long va,
unsigned long addr, int len)
{
unsigned long page, end;
*pba = addr;
end = PAGE_ALIGN((addr + len));
while (addr < end) {
page = va;
......@@ -550,13 +553,15 @@ static void sun4c_map_dma_area(unsigned long va, u32 addr, int len)
addr += PAGE_SIZE;
va += PAGE_SIZE;
}
return 0;
}
static unsigned long sun4c_translate_dvma(unsigned long busa)
static struct page *sun4c_translate_dvma(unsigned long busa)
{
/* Fortunately for us, bus_addr == uncached_virt in sun4c. */
unsigned long pte = sun4c_get_pte(busa);
return (pte << PAGE_SHIFT) + PAGE_OFFSET;
return pfn_to_page(pte & SUN4C_PFN_MASK);
}
static void sun4c_unmap_dma_area(unsigned long busa, int len)
......@@ -1578,21 +1583,33 @@ static void sun4c_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
}
}
void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr,
int bus_type, int rdonly)
static inline void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr)
{
unsigned long page_entry;
page_entry = ((physaddr >> PAGE_SHIFT) & SUN4C_PFN_MASK);
page_entry |= ((pg_iobits | _SUN4C_PAGE_PRIV) & ~(_SUN4C_PAGE_PRESENT));
if (rdonly)
page_entry &= ~_SUN4C_WRITEABLE;
sun4c_put_pte(virt_addr, page_entry);
}
void sun4c_unmapioaddr(unsigned long virt_addr)
static void sun4c_mapiorange(unsigned int bus, unsigned long xpa,
unsigned long xva, unsigned int len)
{
sun4c_put_pte(virt_addr, 0);
while (len != 0) {
len -= PAGE_SIZE;
sun4c_mapioaddr(xpa, xva);
xva += PAGE_SIZE;
xpa += PAGE_SIZE;
}
}
static void sun4c_unmapiorange(unsigned long virt_addr, unsigned int len)
{
while (len != 0) {
len -= PAGE_SIZE;
sun4c_put_pte(virt_addr, 0);
virt_addr += PAGE_SIZE;
}
}
static void sun4c_alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
......@@ -1783,7 +1800,7 @@ static pte_t sun4c_pte_mkyoung(pte_t pte)
*/
static pte_t sun4c_mk_pte(struct page *page, pgprot_t pgprot)
{
return __pte((page - mem_map) | pgprot_val(pgprot));
return __pte(page_to_pfn(page) | pgprot_val(pgprot));
}
static pte_t sun4c_mk_pte_phys(unsigned long phys_page, pgprot_t pgprot)
......@@ -2225,6 +2242,9 @@ void __init ld_mmu_sun4c(void)
BTFIXUPSET_CALL(mmu_unmap_dma_area, sun4c_unmap_dma_area, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_translate_dvma, sun4c_translate_dvma, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(sparc_mapiorange, sun4c_mapiorange, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(sparc_unmapiorange, sun4c_unmapiorange, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(alloc_thread_info, sun4c_alloc_thread_info, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(free_thread_info, sun4c_free_thread_info, BTFIXUPCALL_NORM);
......
/*
* bitext.h: Bit string operations on the sparc, specific to architecture.
*
* Copyright 2002 Pete Zaitcev <zaitcev@yahoo.com>
*/
#ifndef _SPARC_BITEXT_H
#define _SPARC_BITEXT_H
#include <linux/smp_lock.h>
struct bit_map {
spinlock_t lock;
unsigned long *map;
int size;
int used;
int last_off;
};
extern int bit_map_string_get(struct bit_map *t, int len, int align);
extern void bit_map_clear(struct bit_map *t, int offset, int len);
extern void bit_map_init(struct bit_map *t, unsigned long *map, int size);
#endif /* defined(_SPARC_BITEXT_H) */
......@@ -6,6 +6,7 @@
#define _SPARC_IOMMU_H
#include <asm/page.h>
#include <asm/bitext.h>
/* The iommu handles all virtual to physical address translations
* that occur between the SBUS and physical memory. Access by
......@@ -100,11 +101,11 @@ struct iommu_regs {
struct iommu_struct {
struct iommu_regs *regs;
iopte_t *page_table;
iopte_t *lowest; /* to speed up searches... */
unsigned long plow;
/* For convenience */
unsigned long start; /* First managed virtual address */
unsigned long end; /* Last managed virtual address */
struct bit_map usemap;
};
extern __inline__ void iommu_invalidate(struct iommu_regs *regs)
......@@ -112,9 +113,9 @@ extern __inline__ void iommu_invalidate(struct iommu_regs *regs)
regs->tlbflush = 0;
}
extern __inline__ void iommu_invalidate_page(struct iommu_regs *regs, unsigned long page)
extern __inline__ void iommu_invalidate_page(struct iommu_regs *regs, unsigned long ba)
{
regs->pageflush = (page & PAGE_MASK);
regs->pageflush = (ba & PAGE_MASK);
}
#endif /* !(_SPARC_IOMMU_H) */
......@@ -27,6 +27,7 @@
#ifndef __ASSEMBLY__
struct vm_area_struct;
struct page;
extern void load_mmu(void);
extern unsigned long calc_highpages(void);
......@@ -51,15 +52,30 @@ BTFIXUPDEF_CALL(void, mmu_release_scsi_sgl, struct scatterlist *, int, struct s
/*
* mmu_map/unmap are provided by iommu/iounit; Invalid to call on IIep.
*
* The mmu_map_dma_area establishes two mappings in one go.
* These mappings point to pages normally mapped at 'va' (linear address).
* First mapping is for CPU visible address at 'a', uncached.
* This is an alias, but it works because it is an uncached mapping.
* Second mapping is for device visible address, or "bus" address.
* The bus address is returned at '*pba'.
*
* These functions seem distinct, but are hard to split. On sun4c,
* at least for now, 'a' is equal to bus address, and retured in *pba.
* On sun4m, page attributes depend on the CPU type, so we have to
* know if we are mapping RAM or I/O, so it has to be an additional argument
* to a separate mapping function for CPU visible mappings.
*/
BTFIXUPDEF_CALL(void, mmu_map_dma_area, unsigned long va, __u32 addr, int len)
BTFIXUPDEF_CALL(unsigned long /*phys*/, mmu_translate_dvma, unsigned long busa)
BTFIXUPDEF_CALL(int, mmu_map_dma_area, dma_addr_t *, unsigned long, unsigned long, int len)
BTFIXUPDEF_CALL(struct page *, mmu_translate_dvma, unsigned long busa)
BTFIXUPDEF_CALL(void, mmu_unmap_dma_area, unsigned long busa, int len)
#define mmu_map_dma_area(va, ba,len) BTFIXUP_CALL(mmu_map_dma_area)(va,ba,len)
#define mmu_map_dma_area(pba,va,a,len) BTFIXUP_CALL(mmu_map_dma_area)(pba,va,a,len)
#define mmu_unmap_dma_area(ba,len) BTFIXUP_CALL(mmu_unmap_dma_area)(ba,len)
#define mmu_translate_dvma(ba) BTFIXUP_CALL(mmu_translate_dvma)(ba)
/*
*/
BTFIXUPDEF_SIMM13(pmd_shift)
BTFIXUPDEF_SETHI(pmd_size)
BTFIXUPDEF_SETHI(pmd_mask)
......@@ -377,6 +393,12 @@ BTFIXUPDEF_CALL(void, update_mmu_cache, struct vm_area_struct *, unsigned long,
#define update_mmu_cache(vma,addr,pte) BTFIXUP_CALL(update_mmu_cache)(vma,addr,pte)
BTFIXUPDEF_CALL(void, sparc_mapiorange, unsigned int, unsigned long,
unsigned long, unsigned int)
BTFIXUPDEF_CALL(void, sparc_unmapiorange, unsigned long, unsigned int)
#define sparc_mapiorange(bus,pa,va,len) BTFIXUP_CALL(sparc_mapiorange)(bus,pa,va,len)
#define sparc_unmapiorange(va,len) BTFIXUP_CALL(sparc_unmapiorange)(va,len)
extern int invalid_segment;
/* Encode and de-code a swap entry */
......
......@@ -10,7 +10,7 @@
#include <linux/ioport.h>
#include <asm/oplib.h>
#include <asm/iommu.h>
/* #include <asm/iommu.h> */ /* Unused since we use opaque iommu (|io-unit) */
#include <asm/scatterlist.h>
/* We scan which devices are on the SBus using the PROM node device
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment