Commit 00b60c8d authored by Todd Poynor's avatar Todd Poynor Committed by Greg Kroah-Hartman

staging: gasket: pg tbl: remove static function forward declarations

Remove forward declarations of static functions, move code to avoid
forward references, for kernel style.
Signed-off-by: default avatarTodd Poynor <toddpoynor@google.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent d821f8eb
...@@ -214,71 +214,6 @@ struct gasket_page_table { ...@@ -214,71 +214,6 @@ struct gasket_page_table {
struct gasket_coherent_page_entry *coherent_pages; struct gasket_coherent_page_entry *coherent_pages;
}; };
/* Mapping declarations */
static int gasket_map_simple_pages(
struct gasket_page_table *pg_tbl, ulong host_addr,
ulong dev_addr, uint num_pages);
static int gasket_map_extended_pages(
struct gasket_page_table *pg_tbl, ulong host_addr,
ulong dev_addr, uint num_pages);
static int gasket_perform_mapping(
struct gasket_page_table *pg_tbl,
struct gasket_page_table_entry *pte_base, u64 __iomem *att_base,
ulong host_addr, uint num_pages, int is_simple_mapping);
static int gasket_alloc_simple_entries(
struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_pages);
static int gasket_alloc_extended_entries(
struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_entries);
static int gasket_alloc_extended_subtable(
struct gasket_page_table *pg_tbl, struct gasket_page_table_entry *pte,
u64 __iomem *att_reg);
/* Unmapping declarations */
static void gasket_page_table_unmap_nolock(
struct gasket_page_table *pg_tbl, ulong start_addr, uint num_pages);
static void gasket_page_table_unmap_all_nolock(
struct gasket_page_table *pg_tbl);
static void gasket_unmap_simple_pages(
struct gasket_page_table *pg_tbl, ulong start_addr, uint num_pages);
static void gasket_unmap_extended_pages(
struct gasket_page_table *pg_tbl, ulong start_addr, uint num_pages);
static void gasket_perform_unmapping(
struct gasket_page_table *pg_tbl,
struct gasket_page_table_entry *pte_base, u64 __iomem *att_base,
uint num_pages, int is_simple_mapping);
static void gasket_free_extended_subtable(
struct gasket_page_table *pg_tbl, struct gasket_page_table_entry *pte,
u64 __iomem *att_reg);
static bool gasket_release_page(struct page *page);
/* Other/utility declarations */
static inline bool gasket_addr_is_simple(
struct gasket_page_table *pg_tbl, ulong addr);
static bool gasket_is_simple_dev_addr_bad(
struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_pages);
static bool gasket_is_extended_dev_addr_bad(
struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_pages);
static bool gasket_is_pte_range_free(
struct gasket_page_table_entry *pte, uint num_entries);
static void gasket_page_table_garbage_collect_nolock(
struct gasket_page_table *pg_tbl);
/* Address format declarations */
static ulong gasket_components_to_dev_address(
struct gasket_page_table *pg_tbl, int is_simple, uint page_index,
uint offset);
static int gasket_simple_page_idx(
struct gasket_page_table *pg_tbl, ulong dev_addr);
static ulong gasket_extended_lvl0_page_idx(
struct gasket_page_table *pg_tbl, ulong dev_addr);
static ulong gasket_extended_lvl1_page_idx(
struct gasket_page_table *pg_tbl, ulong dev_addr);
static int is_coherent(struct gasket_page_table *pg_tbl, ulong host_addr);
/* Public/exported functions */
/* See gasket_page_table.h for description. */ /* See gasket_page_table.h for description. */
int gasket_page_table_init( int gasket_page_table_init(
struct gasket_page_table **ppg_tbl, struct gasket_page_table **ppg_tbl,
...@@ -353,6 +288,85 @@ int gasket_page_table_init( ...@@ -353,6 +288,85 @@ int gasket_page_table_init(
return 0; return 0;
} }
/*
* Check if a range of PTEs is free.
* The page table mutex must be held by the caller.
*/
static bool gasket_is_pte_range_free(
struct gasket_page_table_entry *ptes, uint num_entries)
{
int i;
for (i = 0; i < num_entries; i++) {
if (ptes[i].status != PTE_FREE)
return false;
}
return true;
}
/*
* Free a second level page [sub]table.
* The page table mutex must be held before this call.
*/
static void gasket_free_extended_subtable(
struct gasket_page_table *pg_tbl, struct gasket_page_table_entry *pte,
u64 __iomem *slot)
{
/* Release the page table from the driver */
pte->status = PTE_FREE;
/* Release the page table from the device */
writeq(0, slot);
/* Force sync around the address release. */
mb();
if (pte->dma_addr)
dma_unmap_page(pg_tbl->device, pte->dma_addr, PAGE_SIZE,
DMA_BIDIRECTIONAL);
vfree(pte->sublevel);
if (pte->page)
free_page((ulong)page_address(pte->page));
memset(pte, 0, sizeof(struct gasket_page_table_entry));
}
/*
* Actually perform collection.
* The page table mutex must be held by the caller.
*/
static void gasket_page_table_garbage_collect_nolock(
struct gasket_page_table *pg_tbl)
{
struct gasket_page_table_entry *pte;
u64 __iomem *slot;
/* XXX FIX ME XXX -- more efficient to keep a usage count */
/* rather than scanning the second level page tables */
for (pte = pg_tbl->entries + pg_tbl->num_simple_entries,
slot = pg_tbl->base_slot + pg_tbl->num_simple_entries;
pte < pg_tbl->entries + pg_tbl->config.total_entries;
pte++, slot++) {
if (pte->status == PTE_INUSE) {
if (gasket_is_pte_range_free(
pte->sublevel, GASKET_PAGES_PER_SUBTABLE))
gasket_free_extended_subtable(
pg_tbl, pte, slot);
}
}
}
/* See gasket_page_table.h for description. */
void gasket_page_table_garbage_collect(struct gasket_page_table *pg_tbl)
{
mutex_lock(&pg_tbl->mutex);
gasket_page_table_garbage_collect_nolock(pg_tbl);
mutex_unlock(&pg_tbl->mutex);
}
/* See gasket_page_table.h for description. */ /* See gasket_page_table.h for description. */
void gasket_page_table_cleanup(struct gasket_page_table *pg_tbl) void gasket_page_table_cleanup(struct gasket_page_table *pg_tbl)
{ {
...@@ -404,500 +418,467 @@ int gasket_page_table_partition( ...@@ -404,500 +418,467 @@ int gasket_page_table_partition(
EXPORT_SYMBOL(gasket_page_table_partition); EXPORT_SYMBOL(gasket_page_table_partition);
/* /*
* See gasket_page_table.h for general description. * Return whether a host buffer was mapped as coherent memory.
*
* gasket_page_table_map calls either gasket_map_simple_pages() or
* gasket_map_extended_pages() to actually perform the mapping.
* *
* The page table mutex is held for the entire operation. * A Gasket page_table currently support one contiguous dma range, mapped to one
* contiguous virtual memory range. Check if the host_addr is within that range.
*/ */
int gasket_page_table_map( static int is_coherent(struct gasket_page_table *pg_tbl, ulong host_addr)
struct gasket_page_table *pg_tbl, ulong host_addr, ulong dev_addr,
uint num_pages)
{ {
int ret; u64 min, max;
if (!num_pages) /* whether the host address is within user virt range */
if (!pg_tbl->coherent_pages)
return 0; return 0;
mutex_lock(&pg_tbl->mutex); min = (u64)pg_tbl->coherent_pages[0].user_virt;
max = min + PAGE_SIZE * pg_tbl->num_coherent_pages;
if (gasket_addr_is_simple(pg_tbl, dev_addr)) {
ret = gasket_map_simple_pages(
pg_tbl, host_addr, dev_addr, num_pages);
} else {
ret = gasket_map_extended_pages(
pg_tbl, host_addr, dev_addr, num_pages);
}
mutex_unlock(&pg_tbl->mutex);
dev_dbg(pg_tbl->device, return min <= host_addr && host_addr < max;
"%s done: ha %llx daddr %llx num %d, ret %d\n",
__func__, (unsigned long long)host_addr,
(unsigned long long)dev_addr, num_pages, ret);
return ret;
} }
EXPORT_SYMBOL(gasket_page_table_map);
/* /*
* See gasket_page_table.h for general description. * Get and map last level page table buffers.
*
* gasket_page_table_unmap takes the page table lock and calls either
* gasket_unmap_simple_pages() or gasket_unmap_extended_pages() to
* actually unmap the pages from device space.
* *
* The page table mutex is held for the entire operation. * slots is the location(s) to write device-mapped page address. If this is a
* simple mapping, these will be address translation registers. If this is
* an extended mapping, these will be within a second-level page table
* allocated by the host and so must have their __iomem attribute casted away.
*/ */
void gasket_page_table_unmap( static int gasket_perform_mapping(
struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_pages) struct gasket_page_table *pg_tbl, struct gasket_page_table_entry *ptes,
u64 __iomem *slots, ulong host_addr, uint num_pages,
int is_simple_mapping)
{ {
if (!num_pages) int ret;
return; ulong offset;
struct page *page;
dma_addr_t dma_addr;
ulong page_addr;
int i;
mutex_lock(&pg_tbl->mutex); for (i = 0; i < num_pages; i++) {
gasket_page_table_unmap_nolock(pg_tbl, dev_addr, num_pages); page_addr = host_addr + i * PAGE_SIZE;
mutex_unlock(&pg_tbl->mutex); offset = page_addr & (PAGE_SIZE - 1);
} dev_dbg(pg_tbl->device, "%s i %d\n", __func__, i);
EXPORT_SYMBOL(gasket_page_table_unmap); if (is_coherent(pg_tbl, host_addr)) {
u64 off =
(u64)host_addr -
(u64)pg_tbl->coherent_pages[0].user_virt;
ptes[i].page = NULL;
ptes[i].offset = offset;
ptes[i].dma_addr = pg_tbl->coherent_pages[0].paddr +
off + i * PAGE_SIZE;
} else {
ret = get_user_pages_fast(
page_addr - offset, 1, 1, &page);
static void gasket_page_table_unmap_all_nolock(struct gasket_page_table *pg_tbl) if (ret <= 0) {
{ dev_err(pg_tbl->device,
gasket_unmap_simple_pages( "get user pages failed for addr=0x%lx, "
pg_tbl, gasket_components_to_dev_address(pg_tbl, 1, 0, 0), "offset=0x%lx [ret=%d]\n",
pg_tbl->num_simple_entries); page_addr, offset, ret);
gasket_unmap_extended_pages( return ret ? ret : -ENOMEM;
pg_tbl, gasket_components_to_dev_address(pg_tbl, 0, 0, 0), }
pg_tbl->num_extended_entries * GASKET_PAGES_PER_SUBTABLE); ++pg_tbl->num_active_pages;
}
/* See gasket_page_table.h for description. */ ptes[i].page = page;
void gasket_page_table_unmap_all(struct gasket_page_table *pg_tbl) ptes[i].offset = offset;
{
mutex_lock(&pg_tbl->mutex);
gasket_page_table_unmap_all_nolock(pg_tbl);
mutex_unlock(&pg_tbl->mutex);
}
EXPORT_SYMBOL(gasket_page_table_unmap_all);
/* See gasket_page_table.h for description. */ /* Map the page into DMA space. */
void gasket_page_table_reset(struct gasket_page_table *pg_tbl) ptes[i].dma_addr =
{ dma_map_page(pg_tbl->device, page, 0, PAGE_SIZE,
mutex_lock(&pg_tbl->mutex); DMA_BIDIRECTIONAL);
gasket_page_table_unmap_all_nolock(pg_tbl); dev_dbg(pg_tbl->device,
writeq(pg_tbl->config.total_entries, pg_tbl->extended_offset_reg); "%s i %d pte %p pfn %p -> mapped %llx\n",
mutex_unlock(&pg_tbl->mutex); __func__, i, &ptes[i],
} (void *)page_to_pfn(page),
(unsigned long long)ptes[i].dma_addr);
/* See gasket_page_table.h for description. */ if (ptes[i].dma_addr == -1) {
void gasket_page_table_garbage_collect(struct gasket_page_table *pg_tbl) dev_dbg(pg_tbl->device,
{ "%s i %d -> fail to map page %llx "
mutex_lock(&pg_tbl->mutex); "[pfn %p ohys %p]\n",
gasket_page_table_garbage_collect_nolock(pg_tbl); __func__, i,
mutex_unlock(&pg_tbl->mutex); (unsigned long long)ptes[i].dma_addr,
} (void *)page_to_pfn(page),
(void *)page_to_phys(page));
return -1;
}
/* Wait until the page is mapped. */
mb();
}
/* See gasket_page_table.h for description. */ /* Make the DMA-space address available to the device. */
int gasket_page_table_lookup_page( dma_addr = (ptes[i].dma_addr + offset) | GASKET_VALID_SLOT_FLAG;
struct gasket_page_table *pg_tbl, ulong dev_addr, struct page **ppage,
ulong *poffset)
{
uint page_num;
struct gasket_page_table_entry *pte;
mutex_lock(&pg_tbl->mutex);
if (gasket_addr_is_simple(pg_tbl, dev_addr)) {
page_num = gasket_simple_page_idx(pg_tbl, dev_addr);
if (page_num >= pg_tbl->num_simple_entries)
goto fail;
pte = pg_tbl->entries + page_num;
if (pte->status != PTE_INUSE)
goto fail;
} else {
/* Find the level 0 entry, */
page_num = gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
if (page_num >= pg_tbl->num_extended_entries)
goto fail;
pte = pg_tbl->entries + pg_tbl->num_simple_entries + page_num;
if (pte->status != PTE_INUSE)
goto fail;
/* and its contained level 1 entry. */ if (is_simple_mapping) {
page_num = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr); writeq(dma_addr, &slots[i]);
pte = pte->sublevel + page_num; } else {
if (pte->status != PTE_INUSE) ((u64 __force *)slots)[i] = dma_addr;
goto fail; /* Extended page table vectors are in DRAM,
* and so need to be synced each time they are updated.
*/
dma_map_single(pg_tbl->device,
(void *)&((u64 __force *)slots)[i],
sizeof(u64), DMA_TO_DEVICE);
}
ptes[i].status = PTE_INUSE;
} }
*ppage = pte->page;
*poffset = pte->offset;
mutex_unlock(&pg_tbl->mutex);
return 0; return 0;
fail:
*ppage = NULL;
*poffset = 0;
mutex_unlock(&pg_tbl->mutex);
return -1;
}
/* See gasket_page_table.h for description. */
bool gasket_page_table_are_addrs_bad(
struct gasket_page_table *pg_tbl, ulong host_addr, ulong dev_addr,
ulong bytes)
{
if (host_addr & (PAGE_SIZE - 1)) {
dev_err(pg_tbl->device,
"host mapping address 0x%lx must be page aligned\n",
host_addr);
return true;
}
return gasket_page_table_is_dev_addr_bad(pg_tbl, dev_addr, bytes);
} }
EXPORT_SYMBOL(gasket_page_table_are_addrs_bad);
/* See gasket_page_table.h for description. */ /*
bool gasket_page_table_is_dev_addr_bad( * Return the index of the page for the address in the simple table.
struct gasket_page_table *pg_tbl, ulong dev_addr, ulong bytes) * Does not perform validity checking.
*/
static int gasket_simple_page_idx(
struct gasket_page_table *pg_tbl, ulong dev_addr)
{ {
uint num_pages = bytes / PAGE_SIZE; return (dev_addr >> GASKET_SIMPLE_PAGE_SHIFT) &
(pg_tbl->config.total_entries - 1);
if (bytes & (PAGE_SIZE - 1)) {
dev_err(pg_tbl->device,
"mapping size 0x%lX must be page aligned\n", bytes);
return true;
}
if (num_pages == 0) {
dev_err(pg_tbl->device,
"requested mapping is less than one page: %lu / %lu\n",
bytes, PAGE_SIZE);
return true;
}
if (gasket_addr_is_simple(pg_tbl, dev_addr))
return gasket_is_simple_dev_addr_bad(
pg_tbl, dev_addr, num_pages);
return gasket_is_extended_dev_addr_bad(pg_tbl, dev_addr, num_pages);
} }
EXPORT_SYMBOL(gasket_page_table_is_dev_addr_bad);
/* See gasket_page_table.h for description. */ /*
uint gasket_page_table_max_size(struct gasket_page_table *page_table) * Return the level 0 page index for the given address.
* Does not perform validity checking.
*/
static ulong gasket_extended_lvl0_page_idx(
struct gasket_page_table *pg_tbl, ulong dev_addr)
{ {
if (!page_table) return (dev_addr >> GASKET_EXTENDED_LVL0_SHIFT) &
return 0; ((1 << GASKET_EXTENDED_LVL0_WIDTH) - 1);
return page_table->config.total_entries;
} }
EXPORT_SYMBOL(gasket_page_table_max_size);
/* See gasket_page_table.h for description. */ /*
uint gasket_page_table_num_entries(struct gasket_page_table *pg_tbl) * Return the level 1 page index for the given address.
* Does not perform validity checking.
*/
static ulong gasket_extended_lvl1_page_idx(
struct gasket_page_table *pg_tbl, ulong dev_addr)
{ {
if (!pg_tbl) return (dev_addr >> GASKET_EXTENDED_LVL1_SHIFT) &
return 0; (GASKET_PAGES_PER_SUBTABLE - 1);
return pg_tbl->num_simple_entries + pg_tbl->num_extended_entries;
} }
EXPORT_SYMBOL(gasket_page_table_num_entries);
/* See gasket_page_table.h for description. */ /*
uint gasket_page_table_num_simple_entries(struct gasket_page_table *pg_tbl) * Allocate page table entries in a simple table.
* The page table mutex must be held by the caller.
*/
static int gasket_alloc_simple_entries(
struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_pages)
{ {
if (!pg_tbl) if (!gasket_is_pte_range_free(
return 0; pg_tbl->entries + gasket_simple_page_idx(pg_tbl, dev_addr),
return pg_tbl->num_simple_entries; num_pages))
} return -EBUSY;
EXPORT_SYMBOL(gasket_page_table_num_simple_entries);
/* See gasket_page_table.h for description. */ return 0;
uint gasket_page_table_num_active_pages(struct gasket_page_table *pg_tbl)
{
if (!pg_tbl)
return 0;
return pg_tbl->num_active_pages;
} }
EXPORT_SYMBOL(gasket_page_table_num_active_pages);
/* See gasket_page_table.h */ /* Safely return a page to the OS. */
int gasket_page_table_system_status(struct gasket_page_table *page_table) static bool gasket_release_page(struct page *page)
{ {
if (!page_table) if (!page)
return GASKET_STATUS_LAMED; return false;
if (gasket_page_table_num_entries(page_table) == 0) { if (!PageReserved(page))
dev_dbg(page_table->device, "Page table size is 0\n"); SetPageDirty(page);
return GASKET_STATUS_LAMED; put_page(page);
}
return GASKET_STATUS_ALIVE; return true;
} }
/* /*
* Allocate and map pages to simple addresses. * Unmap and release mapped pages.
* If there is an error, no pages are mapped. * The page table mutex must be held by the caller.
*/ */
static int gasket_map_simple_pages( static void gasket_perform_unmapping(
struct gasket_page_table *pg_tbl, ulong host_addr, ulong dev_addr, struct gasket_page_table *pg_tbl, struct gasket_page_table_entry *ptes,
uint num_pages) u64 __iomem *slots, uint num_pages, int is_simple_mapping)
{ {
int ret; int i;
uint slot_idx = gasket_simple_page_idx(pg_tbl, dev_addr); /*
* For each page table entry and corresponding entry in the device's
* address translation table:
*/
for (i = 0; i < num_pages; i++) {
/* release the address from the device, */
if (is_simple_mapping || ptes[i].status == PTE_INUSE)
writeq(0, &slots[i]);
else
((u64 __force *)slots)[i] = 0;
/* Force sync around the address release. */
mb();
ret = gasket_alloc_simple_entries(pg_tbl, dev_addr, num_pages); /* release the address from the driver, */
if (ret) { if (ptes[i].status == PTE_INUSE) {
dev_err(pg_tbl->device, if (ptes[i].dma_addr) {
"page table slots %u (@ 0x%lx) to %u are not available\n", dma_unmap_page(pg_tbl->device, ptes[i].dma_addr,
slot_idx, dev_addr, slot_idx + num_pages - 1); PAGE_SIZE, DMA_FROM_DEVICE);
return ret; }
if (gasket_release_page(ptes[i].page))
--pg_tbl->num_active_pages;
}
ptes[i].status = PTE_FREE;
/* and clear the PTE. */
memset(&ptes[i], 0, sizeof(struct gasket_page_table_entry));
} }
}
ret = gasket_perform_mapping( /*
pg_tbl, pg_tbl->entries + slot_idx, * Unmap and release pages mapped to simple addresses.
pg_tbl->base_slot + slot_idx, host_addr, num_pages, 1); * The page table mutex must be held by the caller.
*/
static void gasket_unmap_simple_pages(
struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_pages)
{
uint slot = gasket_simple_page_idx(pg_tbl, dev_addr);
if (ret) { gasket_perform_unmapping(pg_tbl, pg_tbl->entries + slot,
gasket_page_table_unmap_nolock(pg_tbl, dev_addr, num_pages); pg_tbl->base_slot + slot, num_pages, 1);
dev_err(pg_tbl->device, "gasket_perform_mapping %d\n", ret);
}
return ret;
} }
/* /*
* gasket_map_extended_pages - Get and map buffers to extended addresses. * Unmap and release buffers to extended addresses.
* If there is an error, no pages are mapped. * The page table mutex must be held by the caller.
*/ */
static int gasket_map_extended_pages( static void gasket_unmap_extended_pages(
struct gasket_page_table *pg_tbl, ulong host_addr, ulong dev_addr, struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_pages)
uint num_pages)
{ {
int ret;
ulong dev_addr_end;
uint slot_idx, remain, len; uint slot_idx, remain, len;
struct gasket_page_table_entry *pte; struct gasket_page_table_entry *pte;
u64 __iomem *slot_base; u64 __iomem *slot_base;
ret = gasket_alloc_extended_entries(pg_tbl, dev_addr, num_pages);
if (ret) {
dev_addr_end = dev_addr + (num_pages / PAGE_SIZE) - 1;
dev_err(pg_tbl->device,
"page table slots (%lu,%lu) (@ 0x%lx) to (%lu,%lu) are "
"not available\n",
gasket_extended_lvl0_page_idx(pg_tbl, dev_addr),
dev_addr,
gasket_extended_lvl1_page_idx(pg_tbl, dev_addr),
gasket_extended_lvl0_page_idx(pg_tbl, dev_addr_end),
gasket_extended_lvl1_page_idx(pg_tbl, dev_addr_end));
return ret;
}
remain = num_pages; remain = num_pages;
slot_idx = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr); slot_idx = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr);
pte = pg_tbl->entries + pg_tbl->num_simple_entries + pte = pg_tbl->entries + pg_tbl->num_simple_entries +
gasket_extended_lvl0_page_idx(pg_tbl, dev_addr); gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
while (remain > 0) { while (remain > 0) {
/* TODO: Add check to ensure pte remains valid? */
len = min(remain, GASKET_PAGES_PER_SUBTABLE - slot_idx); len = min(remain, GASKET_PAGES_PER_SUBTABLE - slot_idx);
slot_base = if (pte->status == PTE_INUSE) {
(u64 __iomem *)(page_address(pte->page) + pte->offset); slot_base = (u64 __iomem *)(page_address(pte->page) +
ret = gasket_perform_mapping( pte->offset);
pg_tbl, pte->sublevel + slot_idx, slot_base + slot_idx, gasket_perform_unmapping(
host_addr, len, 0); pg_tbl, pte->sublevel + slot_idx,
if (ret) { slot_base + slot_idx, len, 0);
gasket_page_table_unmap_nolock(
pg_tbl, dev_addr, num_pages);
return ret;
} }
remain -= len; remain -= len;
slot_idx = 0; slot_idx = 0;
pte++; pte++;
host_addr += len * PAGE_SIZE;
} }
return 0;
} }
/* /* Evaluates to nonzero if the specified virtual address is simple. */
* Get and map last level page table buffers. static inline bool gasket_addr_is_simple(
* struct gasket_page_table *pg_tbl, ulong addr)
* slots is the location(s) to write device-mapped page address. If this is a
* simple mapping, these will be address translation registers. If this is
* an extended mapping, these will be within a second-level page table
* allocated by the host and so must have their __iomem attribute casted away.
*/
static int gasket_perform_mapping(
struct gasket_page_table *pg_tbl, struct gasket_page_table_entry *ptes,
u64 __iomem *slots, ulong host_addr, uint num_pages,
int is_simple_mapping)
{ {
int ret; return !((addr) & (pg_tbl)->extended_flag);
ulong offset;
struct page *page;
dma_addr_t dma_addr;
ulong page_addr;
int i;
for (i = 0; i < num_pages; i++) {
page_addr = host_addr + i * PAGE_SIZE;
offset = page_addr & (PAGE_SIZE - 1);
dev_dbg(pg_tbl->device, "%s i %d\n", __func__, i);
if (is_coherent(pg_tbl, host_addr)) {
u64 off =
(u64)host_addr -
(u64)pg_tbl->coherent_pages[0].user_virt;
ptes[i].page = NULL;
ptes[i].offset = offset;
ptes[i].dma_addr = pg_tbl->coherent_pages[0].paddr +
off + i * PAGE_SIZE;
} else {
ret = get_user_pages_fast(
page_addr - offset, 1, 1, &page);
if (ret <= 0) {
dev_err(pg_tbl->device,
"get user pages failed for addr=0x%lx, "
"offset=0x%lx [ret=%d]\n",
page_addr, offset, ret);
return ret ? ret : -ENOMEM;
}
++pg_tbl->num_active_pages;
ptes[i].page = page;
ptes[i].offset = offset;
/* Map the page into DMA space. */
ptes[i].dma_addr =
dma_map_page(pg_tbl->device, page, 0, PAGE_SIZE,
DMA_BIDIRECTIONAL);
dev_dbg(pg_tbl->device,
"%s i %d pte %p pfn %p -> mapped %llx\n",
__func__, i, &ptes[i],
(void *)page_to_pfn(page),
(unsigned long long)ptes[i].dma_addr);
if (ptes[i].dma_addr == -1) {
dev_dbg(pg_tbl->device,
"%s i %d -> fail to map page %llx "
"[pfn %p ohys %p]\n",
__func__, i,
(unsigned long long)ptes[i].dma_addr,
(void *)page_to_pfn(page),
(void *)page_to_phys(page));
return -1;
}
/* Wait until the page is mapped. */
mb();
}
/* Make the DMA-space address available to the device. */
dma_addr = (ptes[i].dma_addr + offset) | GASKET_VALID_SLOT_FLAG;
if (is_simple_mapping) {
writeq(dma_addr, &slots[i]);
} else {
((u64 __force *)slots)[i] = dma_addr;
/* Extended page table vectors are in DRAM,
* and so need to be synced each time they are updated.
*/
dma_map_single(pg_tbl->device,
(void *)&((u64 __force *)slots)[i],
sizeof(u64), DMA_TO_DEVICE);
}
ptes[i].status = PTE_INUSE;
}
return 0;
} }
/* /*
* Allocate page table entries in a simple table. * Convert (simple, page, offset) into a device address.
* The page table mutex must be held by the caller. * Examples:
* Simple page 0, offset 32:
* Input (0, 0, 32), Output 0x20
* Simple page 1000, offset 511:
* Input (0, 1000, 512), Output 0x3E81FF
* Extended page 0, offset 32:
* Input (0, 0, 32), Output 0x8000000020
* Extended page 1000, offset 511:
* Input (1, 1000, 512), Output 0x8003E81FF
*/ */
static int gasket_alloc_simple_entries( static ulong gasket_components_to_dev_address(
struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_pages) struct gasket_page_table *pg_tbl, int is_simple, uint page_index,
uint offset)
{ {
if (!gasket_is_pte_range_free( ulong lvl0_index, lvl1_index;
pg_tbl->entries + gasket_simple_page_idx(pg_tbl, dev_addr),
num_pages))
return -EBUSY;
return 0; if (is_simple) {
/* Return simple addresses directly. */
lvl0_index = page_index & (pg_tbl->config.total_entries - 1);
return (lvl0_index << GASKET_SIMPLE_PAGE_SHIFT) | offset;
}
/*
* This could be compressed into fewer statements, but
* A) the compiler should optimize it
* B) this is not slow
* C) this is an uncommon operation
* D) this is actually readable this way.
*/
lvl0_index = page_index / GASKET_PAGES_PER_SUBTABLE;
lvl1_index = page_index & (GASKET_PAGES_PER_SUBTABLE - 1);
return (pg_tbl)->extended_flag |
(lvl0_index << GASKET_EXTENDED_LVL0_SHIFT) |
(lvl1_index << GASKET_EXTENDED_LVL1_SHIFT) | offset;
} }
/* /*
* Allocate slots in an extended page table. Check to see if a range of page * Validity checking for simple addresses.
* table slots are available. If necessary, memory is allocated for second level
* page tables.
*
* Note that memory for second level page tables is allocated as needed, but
* that memory is only freed on the final close of the device file, when the
* page tables are repartitioned, or the the device is removed. If there is an
* error or if the full range of slots is not available, any memory
* allocated for second level page tables remains allocated until final close,
* repartition, or device removal.
* *
* The page table mutex must be held by the caller. * Verify that address translation commutes (from address to/from page + offset)
* and that the requested page range starts and ends within the set of
* currently-partitioned simple pages.
*/ */
static int gasket_alloc_extended_entries( static bool gasket_is_simple_dev_addr_bad(
struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_entries) struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_pages)
{ {
int ret = 0; ulong page_offset = dev_addr & (PAGE_SIZE - 1);
uint remain, subtable_slot_idx, len; ulong page_index =
struct gasket_page_table_entry *pte; (dev_addr / PAGE_SIZE) & (pg_tbl->config.total_entries - 1);
u64 __iomem *slot;
remain = num_entries;
subtable_slot_idx = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr);
pte = pg_tbl->entries + pg_tbl->num_simple_entries +
gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
slot = pg_tbl->base_slot + pg_tbl->num_simple_entries +
gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
while (remain > 0) { if (gasket_components_to_dev_address(
len = min(remain, pg_tbl, 1, page_index, page_offset) != dev_addr) {
GASKET_PAGES_PER_SUBTABLE - subtable_slot_idx); dev_err(pg_tbl->device, "address is invalid, 0x%lX\n",
dev_addr);
return true;
}
if (pte->status == PTE_FREE) { if (page_index >= pg_tbl->num_simple_entries) {
ret = gasket_alloc_extended_subtable(pg_tbl, pte, slot); dev_err(pg_tbl->device,
if (ret) { "starting slot at %lu is too large, max is < %u\n",
dev_err(pg_tbl->device, page_index, pg_tbl->num_simple_entries);
"no memory for extended addr subtable\n"); return true;
return ret; }
}
} else {
if (!gasket_is_pte_range_free(
pte->sublevel + subtable_slot_idx, len))
return -EBUSY;
}
remain -= len; if (page_index + num_pages > pg_tbl->num_simple_entries) {
subtable_slot_idx = 0; dev_err(pg_tbl->device,
pte++; "ending slot at %lu is too large, max is <= %u\n",
slot++; page_index + num_pages, pg_tbl->num_simple_entries);
return true;
} }
return 0; return false;
} }
/* /*
* Allocate a second level page table. * Validity checking for extended addresses.
* The page table mutex must be held by the caller. *
* Verify that address translation commutes (from address to/from page +
* offset) and that the requested page range starts and ends within the set of
* currently-partitioned extended pages.
*/ */
static int gasket_alloc_extended_subtable( static bool gasket_is_extended_dev_addr_bad(
struct gasket_page_table *pg_tbl, struct gasket_page_table_entry *pte, struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_pages)
u64 __iomem *slot)
{ {
ulong page_addr, subtable_bytes; /* Starting byte index of dev_addr into the first mapped page */
dma_addr_t dma_addr; ulong page_offset = dev_addr & (PAGE_SIZE - 1);
ulong page_global_idx, page_lvl0_idx;
ulong num_lvl0_pages;
ulong addr;
/* XXX FIX ME XXX this is inefficient for non-4K page sizes */ /* check if the device address is out of bound */
addr = dev_addr & ~((pg_tbl)->extended_flag);
if (addr >> (GASKET_EXTENDED_LVL0_WIDTH + GASKET_EXTENDED_LVL0_SHIFT)) {
dev_err(pg_tbl->device, "device address out of bounds: 0x%lx\n",
dev_addr);
return true;
}
/* GFP_DMA flag must be passed to architectures for which /* Find the starting sub-page index in the space of all sub-pages. */
* part of the memory range is not considered DMA'able. page_global_idx = (dev_addr / PAGE_SIZE) &
* This seems to be the case for Juno board with 4.5.0 Linaro kernel (pg_tbl->config.total_entries * GASKET_PAGES_PER_SUBTABLE - 1);
/* Find the starting level 0 index. */
page_lvl0_idx = gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
/* Get the count of affected level 0 pages. */
num_lvl0_pages = (num_pages + GASKET_PAGES_PER_SUBTABLE - 1) /
GASKET_PAGES_PER_SUBTABLE;
if (gasket_components_to_dev_address(
pg_tbl, 0, page_global_idx, page_offset) != dev_addr) {
dev_err(pg_tbl->device, "address is invalid: 0x%lx\n",
dev_addr);
return true;
}
if (page_lvl0_idx >= pg_tbl->num_extended_entries) {
dev_err(pg_tbl->device,
"starting level 0 slot at %lu is too large, max is < "
"%u\n", page_lvl0_idx, pg_tbl->num_extended_entries);
return true;
}
if (page_lvl0_idx + num_lvl0_pages > pg_tbl->num_extended_entries) {
dev_err(pg_tbl->device,
"ending level 0 slot at %lu is too large, max is <= %u\n",
page_lvl0_idx + num_lvl0_pages,
pg_tbl->num_extended_entries);
return true;
}
return false;
}
/*
* Non-locking entry to unmapping routines.
* The page table mutex must be held by the caller.
*/
static void gasket_page_table_unmap_nolock(
struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_pages)
{
if (!num_pages)
return;
if (gasket_addr_is_simple(pg_tbl, dev_addr))
gasket_unmap_simple_pages(pg_tbl, dev_addr, num_pages);
else
gasket_unmap_extended_pages(pg_tbl, dev_addr, num_pages);
}
/*
* Allocate and map pages to simple addresses.
* If there is an error, no pages are mapped.
*/
static int gasket_map_simple_pages(
struct gasket_page_table *pg_tbl, ulong host_addr, ulong dev_addr,
uint num_pages)
{
int ret;
uint slot_idx = gasket_simple_page_idx(pg_tbl, dev_addr);
ret = gasket_alloc_simple_entries(pg_tbl, dev_addr, num_pages);
if (ret) {
dev_err(pg_tbl->device,
"page table slots %u (@ 0x%lx) to %u are not available\n",
slot_idx, dev_addr, slot_idx + num_pages - 1);
return ret;
}
ret = gasket_perform_mapping(
pg_tbl, pg_tbl->entries + slot_idx,
pg_tbl->base_slot + slot_idx, host_addr, num_pages, 1);
if (ret) {
gasket_page_table_unmap_nolock(pg_tbl, dev_addr, num_pages);
dev_err(pg_tbl->device, "gasket_perform_mapping %d\n", ret);
}
return ret;
}
/*
* Allocate a second level page table.
* The page table mutex must be held by the caller.
*/
static int gasket_alloc_extended_subtable(
struct gasket_page_table *pg_tbl, struct gasket_page_table_entry *pte,
u64 __iomem *slot)
{
ulong page_addr, subtable_bytes;
dma_addr_t dma_addr;
/* XXX FIX ME XXX this is inefficient for non-4K page sizes */
/* GFP_DMA flag must be passed to architectures for which
* part of the memory range is not considered DMA'able.
* This seems to be the case for Juno board with 4.5.0 Linaro kernel
*/ */
page_addr = get_zeroed_page(GFP_KERNEL | GFP_DMA); page_addr = get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!page_addr) if (!page_addr)
...@@ -930,384 +911,338 @@ static int gasket_alloc_extended_subtable( ...@@ -930,384 +911,338 @@ static int gasket_alloc_extended_subtable(
} }
/* /*
* Non-locking entry to unmapping routines. * Allocate slots in an extended page table. Check to see if a range of page
* table slots are available. If necessary, memory is allocated for second level
* page tables.
*
* Note that memory for second level page tables is allocated as needed, but
* that memory is only freed on the final close of the device file, when the
* page tables are repartitioned, or the the device is removed. If there is an
* error or if the full range of slots is not available, any memory
* allocated for second level page tables remains allocated until final close,
* repartition, or device removal.
*
* The page table mutex must be held by the caller. * The page table mutex must be held by the caller.
*/ */
static void gasket_page_table_unmap_nolock( static int gasket_alloc_extended_entries(
struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_pages) struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_entries)
{ {
if (!num_pages) int ret = 0;
return; uint remain, subtable_slot_idx, len;
struct gasket_page_table_entry *pte;
u64 __iomem *slot;
if (gasket_addr_is_simple(pg_tbl, dev_addr)) remain = num_entries;
gasket_unmap_simple_pages(pg_tbl, dev_addr, num_pages); subtable_slot_idx = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr);
else pte = pg_tbl->entries + pg_tbl->num_simple_entries +
gasket_unmap_extended_pages(pg_tbl, dev_addr, num_pages); gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
} slot = pg_tbl->base_slot + pg_tbl->num_simple_entries +
gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
/* while (remain > 0) {
* Unmap and release pages mapped to simple addresses. len = min(remain,
* The page table mutex must be held by the caller. GASKET_PAGES_PER_SUBTABLE - subtable_slot_idx);
*/
static void gasket_unmap_simple_pages(
struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_pages)
{
uint slot = gasket_simple_page_idx(pg_tbl, dev_addr);
gasket_perform_unmapping(pg_tbl, pg_tbl->entries + slot, if (pte->status == PTE_FREE) {
pg_tbl->base_slot + slot, num_pages, 1); ret = gasket_alloc_extended_subtable(pg_tbl, pte, slot);
if (ret) {
dev_err(pg_tbl->device,
"no memory for extended addr subtable\n");
return ret;
}
} else {
if (!gasket_is_pte_range_free(
pte->sublevel + subtable_slot_idx, len))
return -EBUSY;
}
remain -= len;
subtable_slot_idx = 0;
pte++;
slot++;
}
return 0;
} }
/* /*
* Unmap and release buffers to extended addresses. * gasket_map_extended_pages - Get and map buffers to extended addresses.
* The page table mutex must be held by the caller. * If there is an error, no pages are mapped.
*/ */
static void gasket_unmap_extended_pages( static int gasket_map_extended_pages(
struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_pages) struct gasket_page_table *pg_tbl, ulong host_addr, ulong dev_addr,
uint num_pages)
{ {
int ret;
ulong dev_addr_end;
uint slot_idx, remain, len; uint slot_idx, remain, len;
struct gasket_page_table_entry *pte; struct gasket_page_table_entry *pte;
u64 __iomem *slot_base; u64 __iomem *slot_base;
ret = gasket_alloc_extended_entries(pg_tbl, dev_addr, num_pages);
if (ret) {
dev_addr_end = dev_addr + (num_pages / PAGE_SIZE) - 1;
dev_err(pg_tbl->device,
"page table slots (%lu,%lu) (@ 0x%lx) to (%lu,%lu) are "
"not available\n",
gasket_extended_lvl0_page_idx(pg_tbl, dev_addr),
dev_addr,
gasket_extended_lvl1_page_idx(pg_tbl, dev_addr),
gasket_extended_lvl0_page_idx(pg_tbl, dev_addr_end),
gasket_extended_lvl1_page_idx(pg_tbl, dev_addr_end));
return ret;
}
remain = num_pages; remain = num_pages;
slot_idx = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr); slot_idx = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr);
pte = pg_tbl->entries + pg_tbl->num_simple_entries + pte = pg_tbl->entries + pg_tbl->num_simple_entries +
gasket_extended_lvl0_page_idx(pg_tbl, dev_addr); gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
while (remain > 0) { while (remain > 0) {
/* TODO: Add check to ensure pte remains valid? */
len = min(remain, GASKET_PAGES_PER_SUBTABLE - slot_idx); len = min(remain, GASKET_PAGES_PER_SUBTABLE - slot_idx);
if (pte->status == PTE_INUSE) { slot_base =
slot_base = (u64 __iomem *)(page_address(pte->page) + (u64 __iomem *)(page_address(pte->page) + pte->offset);
pte->offset); ret = gasket_perform_mapping(
gasket_perform_unmapping( pg_tbl, pte->sublevel + slot_idx, slot_base + slot_idx,
pg_tbl, pte->sublevel + slot_idx, host_addr, len, 0);
slot_base + slot_idx, len, 0); if (ret) {
gasket_page_table_unmap_nolock(
pg_tbl, dev_addr, num_pages);
return ret;
} }
remain -= len; remain -= len;
slot_idx = 0; slot_idx = 0;
pte++; pte++;
host_addr += len * PAGE_SIZE;
} }
return 0;
} }
/* /*
* Unmap and release mapped pages. * See gasket_page_table.h for general description.
* The page table mutex must be held by the caller. *
* gasket_page_table_map calls either gasket_map_simple_pages() or
* gasket_map_extended_pages() to actually perform the mapping.
*
* The page table mutex is held for the entire operation.
*/ */
static void gasket_perform_unmapping( int gasket_page_table_map(
struct gasket_page_table *pg_tbl, struct gasket_page_table_entry *ptes, struct gasket_page_table *pg_tbl, ulong host_addr, ulong dev_addr,
u64 __iomem *slots, uint num_pages, int is_simple_mapping) uint num_pages)
{ {
int i; int ret;
/*
* For each page table entry and corresponding entry in the device's
* address translation table:
*/
for (i = 0; i < num_pages; i++) {
/* release the address from the device, */
if (is_simple_mapping || ptes[i].status == PTE_INUSE)
writeq(0, &slots[i]);
else
((u64 __force *)slots)[i] = 0;
/* Force sync around the address release. */
mb();
/* release the address from the driver, */ if (!num_pages)
if (ptes[i].status == PTE_INUSE) { return 0;
if (ptes[i].dma_addr) {
dma_unmap_page(pg_tbl->device, ptes[i].dma_addr,
PAGE_SIZE, DMA_FROM_DEVICE);
}
if (gasket_release_page(ptes[i].page))
--pg_tbl->num_active_pages;
}
ptes[i].status = PTE_FREE;
/* and clear the PTE. */ mutex_lock(&pg_tbl->mutex);
memset(&ptes[i], 0, sizeof(struct gasket_page_table_entry));
}
}
/* if (gasket_addr_is_simple(pg_tbl, dev_addr)) {
* Free a second level page [sub]table. ret = gasket_map_simple_pages(
* The page table mutex must be held before this call. pg_tbl, host_addr, dev_addr, num_pages);
*/ } else {
static void gasket_free_extended_subtable( ret = gasket_map_extended_pages(
struct gasket_page_table *pg_tbl, struct gasket_page_table_entry *pte, pg_tbl, host_addr, dev_addr, num_pages);
u64 __iomem *slot) }
{
/* Release the page table from the driver */
pte->status = PTE_FREE;
/* Release the page table from the device */
writeq(0, slot);
/* Force sync around the address release. */
mb();
if (pte->dma_addr) mutex_unlock(&pg_tbl->mutex);
dma_unmap_page(pg_tbl->device, pte->dma_addr, PAGE_SIZE,
DMA_BIDIRECTIONAL);
vfree(pte->sublevel); dev_dbg(pg_tbl->device,
"%s done: ha %llx daddr %llx num %d, ret %d\n",
__func__, (unsigned long long)host_addr,
(unsigned long long)dev_addr, num_pages, ret);
return ret;
}
EXPORT_SYMBOL(gasket_page_table_map);
if (pte->page) /*
free_page((ulong)page_address(pte->page)); * See gasket_page_table.h for general description.
*
* gasket_page_table_unmap takes the page table lock and calls either
* gasket_unmap_simple_pages() or gasket_unmap_extended_pages() to
* actually unmap the pages from device space.
*
* The page table mutex is held for the entire operation.
*/
void gasket_page_table_unmap(
struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_pages)
{
if (!num_pages)
return;
memset(pte, 0, sizeof(struct gasket_page_table_entry)); mutex_lock(&pg_tbl->mutex);
gasket_page_table_unmap_nolock(pg_tbl, dev_addr, num_pages);
mutex_unlock(&pg_tbl->mutex);
} }
EXPORT_SYMBOL(gasket_page_table_unmap);
/* Safely return a page to the OS. */ static void gasket_page_table_unmap_all_nolock(struct gasket_page_table *pg_tbl)
static bool gasket_release_page(struct page *page)
{ {
if (!page) gasket_unmap_simple_pages(
return false; pg_tbl, gasket_components_to_dev_address(pg_tbl, 1, 0, 0),
pg_tbl->num_simple_entries);
if (!PageReserved(page)) gasket_unmap_extended_pages(
SetPageDirty(page); pg_tbl, gasket_components_to_dev_address(pg_tbl, 0, 0, 0),
put_page(page); pg_tbl->num_extended_entries * GASKET_PAGES_PER_SUBTABLE);
}
return true; /* See gasket_page_table.h for description. */
void gasket_page_table_unmap_all(struct gasket_page_table *pg_tbl)
{
mutex_lock(&pg_tbl->mutex);
gasket_page_table_unmap_all_nolock(pg_tbl);
mutex_unlock(&pg_tbl->mutex);
} }
EXPORT_SYMBOL(gasket_page_table_unmap_all);
/* Evaluates to nonzero if the specified virtual address is simple. */ /* See gasket_page_table.h for description. */
static inline bool gasket_addr_is_simple( void gasket_page_table_reset(struct gasket_page_table *pg_tbl)
struct gasket_page_table *pg_tbl, ulong addr)
{ {
return !((addr) & (pg_tbl)->extended_flag); mutex_lock(&pg_tbl->mutex);
gasket_page_table_unmap_all_nolock(pg_tbl);
writeq(pg_tbl->config.total_entries, pg_tbl->extended_offset_reg);
mutex_unlock(&pg_tbl->mutex);
} }
/* /* See gasket_page_table.h for description. */
* Validity checking for simple addresses. int gasket_page_table_lookup_page(
* struct gasket_page_table *pg_tbl, ulong dev_addr, struct page **ppage,
* Verify that address translation commutes (from address to/from page + offset) ulong *poffset)
* and that the requested page range starts and ends within the set of
* currently-partitioned simple pages.
*/
static bool gasket_is_simple_dev_addr_bad(
struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_pages)
{ {
ulong page_offset = dev_addr & (PAGE_SIZE - 1); uint page_num;
ulong page_index = struct gasket_page_table_entry *pte;
(dev_addr / PAGE_SIZE) & (pg_tbl->config.total_entries - 1);
if (gasket_components_to_dev_address( mutex_lock(&pg_tbl->mutex);
pg_tbl, 1, page_index, page_offset) != dev_addr) { if (gasket_addr_is_simple(pg_tbl, dev_addr)) {
dev_err(pg_tbl->device, "address is invalid, 0x%lX\n", page_num = gasket_simple_page_idx(pg_tbl, dev_addr);
dev_addr); if (page_num >= pg_tbl->num_simple_entries)
return true; goto fail;
}
if (page_index >= pg_tbl->num_simple_entries) { pte = pg_tbl->entries + page_num;
dev_err(pg_tbl->device, if (pte->status != PTE_INUSE)
"starting slot at %lu is too large, max is < %u\n", goto fail;
page_index, pg_tbl->num_simple_entries); } else {
return true; /* Find the level 0 entry, */
} page_num = gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
if (page_num >= pg_tbl->num_extended_entries)
goto fail;
if (page_index + num_pages > pg_tbl->num_simple_entries) { pte = pg_tbl->entries + pg_tbl->num_simple_entries + page_num;
dev_err(pg_tbl->device, if (pte->status != PTE_INUSE)
"ending slot at %lu is too large, max is <= %u\n", goto fail;
page_index + num_pages, pg_tbl->num_simple_entries);
return true; /* and its contained level 1 entry. */
page_num = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr);
pte = pte->sublevel + page_num;
if (pte->status != PTE_INUSE)
goto fail;
} }
return false; *ppage = pte->page;
*poffset = pte->offset;
mutex_unlock(&pg_tbl->mutex);
return 0;
fail:
*ppage = NULL;
*poffset = 0;
mutex_unlock(&pg_tbl->mutex);
return -1;
} }
/* /* See gasket_page_table.h for description. */
* Validity checking for extended addresses. bool gasket_page_table_are_addrs_bad(
* struct gasket_page_table *pg_tbl, ulong host_addr, ulong dev_addr,
* Verify that address translation commutes (from address to/from page + ulong bytes)
* offset) and that the requested page range starts and ends within the set of
* currently-partitioned extended pages.
*/
static bool gasket_is_extended_dev_addr_bad(
struct gasket_page_table *pg_tbl, ulong dev_addr, uint num_pages)
{ {
/* Starting byte index of dev_addr into the first mapped page */ if (host_addr & (PAGE_SIZE - 1)) {
ulong page_offset = dev_addr & (PAGE_SIZE - 1); dev_err(pg_tbl->device,
ulong page_global_idx, page_lvl0_idx; "host mapping address 0x%lx must be page aligned\n",
ulong num_lvl0_pages; host_addr);
ulong addr;
/* check if the device address is out of bound */
addr = dev_addr & ~((pg_tbl)->extended_flag);
if (addr >> (GASKET_EXTENDED_LVL0_WIDTH + GASKET_EXTENDED_LVL0_SHIFT)) {
dev_err(pg_tbl->device, "device address out of bounds: 0x%lx\n",
dev_addr);
return true; return true;
} }
/* Find the starting sub-page index in the space of all sub-pages. */ return gasket_page_table_is_dev_addr_bad(pg_tbl, dev_addr, bytes);
page_global_idx = (dev_addr / PAGE_SIZE) & }
(pg_tbl->config.total_entries * GASKET_PAGES_PER_SUBTABLE - 1); EXPORT_SYMBOL(gasket_page_table_are_addrs_bad);
/* Find the starting level 0 index. */
page_lvl0_idx = gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
/* Get the count of affected level 0 pages. */
num_lvl0_pages = (num_pages + GASKET_PAGES_PER_SUBTABLE - 1) /
GASKET_PAGES_PER_SUBTABLE;
if (gasket_components_to_dev_address( /* See gasket_page_table.h for description. */
pg_tbl, 0, page_global_idx, page_offset) != dev_addr) { bool gasket_page_table_is_dev_addr_bad(
dev_err(pg_tbl->device, "address is invalid: 0x%lx\n", struct gasket_page_table *pg_tbl, ulong dev_addr, ulong bytes)
dev_addr); {
return true; uint num_pages = bytes / PAGE_SIZE;
}
if (page_lvl0_idx >= pg_tbl->num_extended_entries) { if (bytes & (PAGE_SIZE - 1)) {
dev_err(pg_tbl->device, dev_err(pg_tbl->device,
"starting level 0 slot at %lu is too large, max is < " "mapping size 0x%lX must be page aligned\n", bytes);
"%u\n", page_lvl0_idx, pg_tbl->num_extended_entries);
return true; return true;
} }
if (page_lvl0_idx + num_lvl0_pages > pg_tbl->num_extended_entries) { if (num_pages == 0) {
dev_err(pg_tbl->device, dev_err(pg_tbl->device,
"ending level 0 slot at %lu is too large, max is <= %u\n", "requested mapping is less than one page: %lu / %lu\n",
page_lvl0_idx + num_lvl0_pages, bytes, PAGE_SIZE);
pg_tbl->num_extended_entries);
return true; return true;
} }
return false; if (gasket_addr_is_simple(pg_tbl, dev_addr))
} return gasket_is_simple_dev_addr_bad(
pg_tbl, dev_addr, num_pages);
/* return gasket_is_extended_dev_addr_bad(pg_tbl, dev_addr, num_pages);
* Check if a range of PTEs is free.
* The page table mutex must be held by the caller.
*/
static bool gasket_is_pte_range_free(
struct gasket_page_table_entry *ptes, uint num_entries)
{
int i;
for (i = 0; i < num_entries; i++) {
if (ptes[i].status != PTE_FREE)
return false;
}
return true;
}
/*
* Actually perform collection.
* The page table mutex must be held by the caller.
*/
static void gasket_page_table_garbage_collect_nolock(
struct gasket_page_table *pg_tbl)
{
struct gasket_page_table_entry *pte;
u64 __iomem *slot;
/* XXX FIX ME XXX -- more efficient to keep a usage count */
/* rather than scanning the second level page tables */
for (pte = pg_tbl->entries + pg_tbl->num_simple_entries,
slot = pg_tbl->base_slot + pg_tbl->num_simple_entries;
pte < pg_tbl->entries + pg_tbl->config.total_entries;
pte++, slot++) {
if (pte->status == PTE_INUSE) {
if (gasket_is_pte_range_free(
pte->sublevel, GASKET_PAGES_PER_SUBTABLE))
gasket_free_extended_subtable(
pg_tbl, pte, slot);
}
}
} }
EXPORT_SYMBOL(gasket_page_table_is_dev_addr_bad);
/* /* See gasket_page_table.h for description. */
* Convert (simple, page, offset) into a device address. uint gasket_page_table_max_size(struct gasket_page_table *page_table)
* Examples:
* Simple page 0, offset 32:
* Input (0, 0, 32), Output 0x20
* Simple page 1000, offset 511:
* Input (0, 1000, 512), Output 0x3E81FF
* Extended page 0, offset 32:
* Input (0, 0, 32), Output 0x8000000020
* Extended page 1000, offset 511:
* Input (1, 1000, 512), Output 0x8003E81FF
*/
static ulong gasket_components_to_dev_address(
struct gasket_page_table *pg_tbl, int is_simple, uint page_index,
uint offset)
{ {
ulong lvl0_index, lvl1_index; if (!page_table)
return 0;
if (is_simple) { return page_table->config.total_entries;
/* Return simple addresses directly. */
lvl0_index = page_index & (pg_tbl->config.total_entries - 1);
return (lvl0_index << GASKET_SIMPLE_PAGE_SHIFT) | offset;
}
/*
* This could be compressed into fewer statements, but
* A) the compiler should optimize it
* B) this is not slow
* C) this is an uncommon operation
* D) this is actually readable this way.
*/
lvl0_index = page_index / GASKET_PAGES_PER_SUBTABLE;
lvl1_index = page_index & (GASKET_PAGES_PER_SUBTABLE - 1);
return (pg_tbl)->extended_flag |
(lvl0_index << GASKET_EXTENDED_LVL0_SHIFT) |
(lvl1_index << GASKET_EXTENDED_LVL1_SHIFT) | offset;
} }
EXPORT_SYMBOL(gasket_page_table_max_size);
/* /* See gasket_page_table.h for description. */
* Return the index of the page for the address in the simple table. uint gasket_page_table_num_entries(struct gasket_page_table *pg_tbl)
* Does not perform validity checking.
*/
static int gasket_simple_page_idx(
struct gasket_page_table *pg_tbl, ulong dev_addr)
{ {
return (dev_addr >> GASKET_SIMPLE_PAGE_SHIFT) & if (!pg_tbl)
(pg_tbl->config.total_entries - 1); return 0;
return pg_tbl->num_simple_entries + pg_tbl->num_extended_entries;
} }
EXPORT_SYMBOL(gasket_page_table_num_entries);
/* /* See gasket_page_table.h for description. */
* Return the level 0 page index for the given address. uint gasket_page_table_num_simple_entries(struct gasket_page_table *pg_tbl)
* Does not perform validity checking.
*/
static ulong gasket_extended_lvl0_page_idx(
struct gasket_page_table *pg_tbl, ulong dev_addr)
{ {
return (dev_addr >> GASKET_EXTENDED_LVL0_SHIFT) & if (!pg_tbl)
((1 << GASKET_EXTENDED_LVL0_WIDTH) - 1); return 0;
return pg_tbl->num_simple_entries;
} }
EXPORT_SYMBOL(gasket_page_table_num_simple_entries);
/* /* See gasket_page_table.h for description. */
* Return the level 1 page index for the given address. uint gasket_page_table_num_active_pages(struct gasket_page_table *pg_tbl)
* Does not perform validity checking.
*/
static ulong gasket_extended_lvl1_page_idx(
struct gasket_page_table *pg_tbl, ulong dev_addr)
{ {
return (dev_addr >> GASKET_EXTENDED_LVL1_SHIFT) & if (!pg_tbl)
(GASKET_PAGES_PER_SUBTABLE - 1); return 0;
return pg_tbl->num_active_pages;
} }
EXPORT_SYMBOL(gasket_page_table_num_active_pages);
/* /* See gasket_page_table.h */
* Return whether a host buffer was mapped as coherent memory. int gasket_page_table_system_status(struct gasket_page_table *page_table)
*
* A Gasket page_table currently support one contiguous dma range, mapped to one
* contiguous virtual memory range. Check if the host_addr is within that range.
*/
static int is_coherent(struct gasket_page_table *pg_tbl, ulong host_addr)
{ {
u64 min, max; if (!page_table)
return GASKET_STATUS_LAMED;
/* whether the host address is within user virt range */
if (!pg_tbl->coherent_pages)
return 0;
min = (u64)pg_tbl->coherent_pages[0].user_virt; if (gasket_page_table_num_entries(page_table) == 0) {
max = min + PAGE_SIZE * pg_tbl->num_coherent_pages; dev_dbg(page_table->device, "Page table size is 0\n");
return GASKET_STATUS_LAMED;
}
return min <= host_addr && host_addr < max; return GASKET_STATUS_ALIVE;
} }
/* Record the host_addr to coherent dma memory mapping. */ /* Record the host_addr to coherent dma memory mapping. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment