Commit 1757659d authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

ACPI: OSL: Implement deferred unmapping of ACPI memory

The ACPI OS layer in Linux uses RCU to protect the walkers of the
list of ACPI memory mappings from seeing an inconsistent state
while it is being updated.  Among other situations, that list can
be walked in (NMI and non-NMI) interrupt context, so using a
sleeping lock to protect it is not an option.

However, performance issues related to the RCU usage in there
appear, as described by Dan Williams:

"Recently a performance problem was reported for a process invoking
a non-trival ASL program. The method call in this case ends up
repetitively triggering a call path like:

    acpi_ex_store
    acpi_ex_store_object_to_node
    acpi_ex_write_data_to_field
    acpi_ex_insert_into_field
    acpi_ex_write_with_update_rule
    acpi_ex_field_datum_io
    acpi_ex_access_region
    acpi_ev_address_space_dispatch
    acpi_ex_system_memory_space_handler
    acpi_os_map_cleanup.part.14
    _synchronize_rcu_expedited.constprop.89
    schedule

The end result of frequent synchronize_rcu_expedited() invocation is
tiny sub-millisecond spurts of execution where the scheduler freely
migrates this apparently sleepy task. The overhead of frequent
scheduler invocation multiplies the execution time by a factor
of 2-3X."

The source of this is that acpi_ex_system_memory_space_handler()
unmaps the memory mapping currently cached by it at the access time
if that mapping doesn't cover the memory area being accessed.
Consequently, if there is a memory opregion with two fields
separated from each other by an unused chunk of address space that
is large enough for not being covered by a single mapping, and they
happen to be used in an alternating pattern, the unmapping will
occur on every acpi_ex_system_memory_space_handler() invocation for
that memory opregion and that will lead to significant overhead.

Moreover, acpi_ex_system_memory_space_handler() carries out the
memory unmapping with the namespace and interpreter mutexes held
which may lead to additional latency, because all of the tasks
wanting to acquire on of these mutexes need to wait for the
memory unmapping operation to complete.

To address that, rework acpi_os_unmap_memory() so that it does not
release the memory mapping covering the given address range right
away and instead make it queue up the mapping at hand for removal
via queue_rcu_work().
Reported-by: default avatarDan Williams <dan.j.williams@intel.com>
Tested-by: default avatarXiang Li <xiang.z.li@intel.com>
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
parent 9ebcfadb
...@@ -77,7 +77,10 @@ struct acpi_ioremap { ...@@ -77,7 +77,10 @@ struct acpi_ioremap {
void __iomem *virt; void __iomem *virt;
acpi_physical_address phys; acpi_physical_address phys;
acpi_size size; acpi_size size;
union {
unsigned long refcount; unsigned long refcount;
struct rcu_work rwork;
} track;
}; };
static LIST_HEAD(acpi_ioremaps); static LIST_HEAD(acpi_ioremaps);
...@@ -250,7 +253,7 @@ void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size) ...@@ -250,7 +253,7 @@ void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
map = acpi_map_lookup(phys, size); map = acpi_map_lookup(phys, size);
if (map) { if (map) {
virt = map->virt + (phys - map->phys); virt = map->virt + (phys - map->phys);
map->refcount++; map->track.refcount++;
} }
mutex_unlock(&acpi_ioremap_lock); mutex_unlock(&acpi_ioremap_lock);
return virt; return virt;
...@@ -335,7 +338,7 @@ void __iomem __ref ...@@ -335,7 +338,7 @@ void __iomem __ref
/* Check if there's a suitable mapping already. */ /* Check if there's a suitable mapping already. */
map = acpi_map_lookup(phys, size); map = acpi_map_lookup(phys, size);
if (map) { if (map) {
map->refcount++; map->track.refcount++;
goto out; goto out;
} }
...@@ -358,7 +361,7 @@ void __iomem __ref ...@@ -358,7 +361,7 @@ void __iomem __ref
map->virt = virt; map->virt = virt;
map->phys = pg_off; map->phys = pg_off;
map->size = pg_sz; map->size = pg_sz;
map->refcount = 1; map->track.refcount = 1;
list_add_tail_rcu(&map->list, &acpi_ioremaps); list_add_tail_rcu(&map->list, &acpi_ioremaps);
...@@ -374,41 +377,46 @@ void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size) ...@@ -374,41 +377,46 @@ void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
} }
EXPORT_SYMBOL_GPL(acpi_os_map_memory); EXPORT_SYMBOL_GPL(acpi_os_map_memory);
static void acpi_os_map_remove(struct acpi_ioremap *map)
{
acpi_unmap(map->phys, map->virt);
kfree(map);
}
static void acpi_os_map_cleanup_deferred(struct work_struct *work)
{
acpi_os_map_remove(container_of(to_rcu_work(work), struct acpi_ioremap,
track.rwork));
}
/* Must be called with mutex_lock(&acpi_ioremap_lock) */ /* Must be called with mutex_lock(&acpi_ioremap_lock) */
static unsigned long acpi_os_drop_map_ref(struct acpi_ioremap *map) static bool acpi_os_drop_map_ref(struct acpi_ioremap *map, bool defer)
{ {
unsigned long refcount = --map->refcount; if (--map->track.refcount)
return true;
if (!refcount)
list_del_rcu(&map->list); list_del_rcu(&map->list);
return refcount;
if (defer) {
INIT_RCU_WORK(&map->track.rwork, acpi_os_map_cleanup_deferred);
queue_rcu_work(system_wq, &map->track.rwork);
}
return defer;
} }
static void acpi_os_map_cleanup(struct acpi_ioremap *map) static void acpi_os_map_cleanup(struct acpi_ioremap *map)
{ {
if (!map)
return;
synchronize_rcu_expedited(); synchronize_rcu_expedited();
acpi_unmap(map->phys, map->virt); acpi_os_map_remove(map);
kfree(map);
} }
/** static void __ref __acpi_os_unmap_iomem(void __iomem *virt, acpi_size size,
* acpi_os_unmap_iomem - Drop a memory mapping reference. bool defer)
* @virt: Start of the address range to drop a reference to.
* @size: Size of the address range to drop a reference to.
*
* Look up the given virtual address range in the list of existing ACPI memory
* mappings, drop a reference to it and unmap it if there are no more active
* references to it.
*
* During early init (when acpi_permanent_mmap has not been set yet) this
* routine simply calls __acpi_unmap_table() to get the job done. Since
* __acpi_unmap_table() is an __init function, the __ref annotation is needed
* here.
*/
void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
{ {
struct acpi_ioremap *map; struct acpi_ioremap *map;
unsigned long refcount;
if (!acpi_permanent_mmap) { if (!acpi_permanent_mmap) {
__acpi_unmap_table(virt, size); __acpi_unmap_table(virt, size);
...@@ -416,23 +424,56 @@ void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size) ...@@ -416,23 +424,56 @@ void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
} }
mutex_lock(&acpi_ioremap_lock); mutex_lock(&acpi_ioremap_lock);
map = acpi_map_lookup_virt(virt, size); map = acpi_map_lookup_virt(virt, size);
if (!map) { if (!map) {
mutex_unlock(&acpi_ioremap_lock); mutex_unlock(&acpi_ioremap_lock);
WARN(true, PREFIX "%s: bad address %p\n", __func__, virt); WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
return; return;
} }
refcount = acpi_os_drop_map_ref(map); if (acpi_os_drop_map_ref(map, defer))
map = NULL;
mutex_unlock(&acpi_ioremap_lock); mutex_unlock(&acpi_ioremap_lock);
if (!refcount)
acpi_os_map_cleanup(map); acpi_os_map_cleanup(map);
} }
/**
* acpi_os_unmap_iomem - Drop a memory mapping reference.
* @virt: Start of the address range to drop a reference to.
* @size: Size of the address range to drop a reference to.
*
* Look up the given virtual address range in the list of existing ACPI memory
* mappings, drop a reference to it and unmap it if there are no more active
* references to it.
*
* During early init (when acpi_permanent_mmap has not been set yet) this
* routine simply calls __acpi_unmap_table() to get the job done. Since
* __acpi_unmap_table() is an __init function, the __ref annotation is needed
* here.
*/
void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
{
__acpi_os_unmap_iomem(virt, size, false);
}
EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem); EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
/**
* acpi_os_unmap_memory - Drop a memory mapping reference.
* @virt: Start of the address range to drop a reference to.
* @size: Size of the address range to drop a reference to.
*
* Look up the given virtual address range in the list of existing ACPI memory
* mappings, drop a reference to it and if there are no more active references
* to it, queue it up for later removal.
*
* During early init (when acpi_permanent_mmap has not been set yet) this
* routine behaves like acpi_os_unmap_iomem().
*/
void __ref acpi_os_unmap_memory(void *virt, acpi_size size) void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
{ {
return acpi_os_unmap_iomem((void __iomem *)virt, size); __acpi_os_unmap_iomem((void __iomem *)virt, size, true);
} }
EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
...@@ -461,7 +502,6 @@ void acpi_os_unmap_generic_address(struct acpi_generic_address *gas) ...@@ -461,7 +502,6 @@ void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
{ {
u64 addr; u64 addr;
struct acpi_ioremap *map; struct acpi_ioremap *map;
unsigned long refcount;
if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
return; return;
...@@ -472,15 +512,17 @@ void acpi_os_unmap_generic_address(struct acpi_generic_address *gas) ...@@ -472,15 +512,17 @@ void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
return; return;
mutex_lock(&acpi_ioremap_lock); mutex_lock(&acpi_ioremap_lock);
map = acpi_map_lookup(addr, gas->bit_width / 8); map = acpi_map_lookup(addr, gas->bit_width / 8);
if (!map) { if (!map) {
mutex_unlock(&acpi_ioremap_lock); mutex_unlock(&acpi_ioremap_lock);
return; return;
} }
refcount = acpi_os_drop_map_ref(map); if (acpi_os_drop_map_ref(map, false))
map = NULL;
mutex_unlock(&acpi_ioremap_lock); mutex_unlock(&acpi_ioremap_lock);
if (!refcount)
acpi_os_map_cleanup(map); acpi_os_map_cleanup(map);
} }
EXPORT_SYMBOL(acpi_os_unmap_generic_address); EXPORT_SYMBOL(acpi_os_unmap_generic_address);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment