Commit f71f6421 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dma-mapping-6.5-2023-07-09' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping fixes from Christoph Hellwig:

 - swiotlb area sizing fixes (Petr Tesarik)

* tag 'dma-mapping-6.5-2023-07-09' of git://git.infradead.org/users/hch/dma-mapping:
  swiotlb: reduce the number of areas to match actual memory pool size
  swiotlb: always set the number of areas before allocating the pool
parents a9943ad3 8ac04063
......@@ -115,9 +115,16 @@ static bool round_up_default_nslabs(void)
return true;
}
/**
* swiotlb_adjust_nareas() - adjust the number of areas and slots
* @nareas: Desired number of areas. Zero is treated as 1.
*
* Adjust the default number of areas in a memory pool.
* The default size of the memory pool may also change to meet minimum area
* size requirements.
*/
static void swiotlb_adjust_nareas(unsigned int nareas)
{
/* use a single area when non is specified */
if (!nareas)
nareas = 1;
else if (!is_power_of_2(nareas))
......@@ -131,6 +138,23 @@ static void swiotlb_adjust_nareas(unsigned int nareas)
(default_nslabs << IO_TLB_SHIFT) >> 20);
}
/**
* limit_nareas() - get the maximum number of areas for a given memory pool size
* @nareas: Desired number of areas.
* @nslots: Total number of slots in the memory pool.
*
* Limit the number of areas to the maximum possible number of areas in
* a memory pool of the given size.
*
* Return: Maximum possible number of areas.
*/
static unsigned int limit_nareas(unsigned int nareas, unsigned long nslots)
{
if (nslots < nareas * IO_TLB_SEGSIZE)
return nslots / IO_TLB_SEGSIZE;
return nareas;
}
static int __init
setup_io_tlb_npages(char *str)
{
......@@ -290,6 +314,7 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
{
struct io_tlb_mem *mem = &io_tlb_default_mem;
unsigned long nslabs;
unsigned int nareas;
size_t alloc_size;
void *tlb;
......@@ -298,18 +323,16 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
if (swiotlb_force_disable)
return;
/*
* default_nslabs maybe changed when adjust area number.
* So allocate bounce buffer after adjusting area number.
*/
if (!default_nareas)
swiotlb_adjust_nareas(num_possible_cpus());
nslabs = default_nslabs;
nareas = limit_nareas(default_nareas, nslabs);
while ((tlb = swiotlb_memblock_alloc(nslabs, flags, remap)) == NULL) {
if (nslabs <= IO_TLB_MIN_SLABS)
return;
nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
nareas = limit_nareas(nareas, nslabs);
}
if (default_nslabs != nslabs) {
......@@ -355,6 +378,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
{
struct io_tlb_mem *mem = &io_tlb_default_mem;
unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
unsigned int nareas;
unsigned char *vstart = NULL;
unsigned int order, area_order;
bool retried = false;
......@@ -363,6 +387,9 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
if (swiotlb_force_disable)
return 0;
if (!default_nareas)
swiotlb_adjust_nareas(num_possible_cpus());
retry:
order = get_order(nslabs << IO_TLB_SHIFT);
nslabs = SLABS_PER_PAGE << order;
......@@ -397,11 +424,8 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
(PAGE_SIZE << order) >> 20);
}
if (!default_nareas)
swiotlb_adjust_nareas(num_possible_cpus());
area_order = get_order(array_size(sizeof(*mem->areas),
default_nareas));
nareas = limit_nareas(default_nareas, nslabs);
area_order = get_order(array_size(sizeof(*mem->areas), nareas));
mem->areas = (struct io_tlb_area *)
__get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order);
if (!mem->areas)
......@@ -415,7 +439,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
set_memory_decrypted((unsigned long)vstart,
(nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true,
default_nareas);
nareas);
swiotlb_print_info();
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment