Commit 3d39cecc authored by David Woodhouse's avatar David Woodhouse

intel-iommu: Remove superfluous iova_alloc_lock from IOVA code

We only ever obtain this lock immediately before the iova_rbtree_lock,
and release it immediately after the iova_rbtree_lock. So ditch it and
just use iova_rbtree_lock.

[v2: Remove the lockdep bits this time too]
Signed-off-by: default avatarDavid Woodhouse <David.Woodhouse@intel.com>
parent 147202aa
...@@ -1309,7 +1309,6 @@ static void iommu_detach_domain(struct dmar_domain *domain, ...@@ -1309,7 +1309,6 @@ static void iommu_detach_domain(struct dmar_domain *domain,
} }
static struct iova_domain reserved_iova_list; static struct iova_domain reserved_iova_list;
static struct lock_class_key reserved_alloc_key;
static struct lock_class_key reserved_rbtree_key; static struct lock_class_key reserved_rbtree_key;
static void dmar_init_reserved_ranges(void) static void dmar_init_reserved_ranges(void)
...@@ -1320,8 +1319,6 @@ static void dmar_init_reserved_ranges(void) ...@@ -1320,8 +1319,6 @@ static void dmar_init_reserved_ranges(void)
init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN); init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
&reserved_alloc_key);
lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
&reserved_rbtree_key); &reserved_rbtree_key);
......
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
void void
init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
{ {
spin_lock_init(&iovad->iova_alloc_lock);
spin_lock_init(&iovad->iova_rbtree_lock); spin_lock_init(&iovad->iova_rbtree_lock);
iovad->rbroot = RB_ROOT; iovad->rbroot = RB_ROOT;
iovad->cached32_node = NULL; iovad->cached32_node = NULL;
...@@ -205,7 +204,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, ...@@ -205,7 +204,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn, unsigned long limit_pfn,
bool size_aligned) bool size_aligned)
{ {
unsigned long flags;
struct iova *new_iova; struct iova *new_iova;
int ret; int ret;
...@@ -219,11 +217,9 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, ...@@ -219,11 +217,9 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
if (size_aligned) if (size_aligned)
size = __roundup_pow_of_two(size); size = __roundup_pow_of_two(size);
spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
new_iova, size_aligned); new_iova, size_aligned);
spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
if (ret) { if (ret) {
free_iova_mem(new_iova); free_iova_mem(new_iova);
return NULL; return NULL;
...@@ -381,8 +377,7 @@ reserve_iova(struct iova_domain *iovad, ...@@ -381,8 +377,7 @@ reserve_iova(struct iova_domain *iovad,
struct iova *iova; struct iova *iova;
unsigned int overlap = 0; unsigned int overlap = 0;
spin_lock_irqsave(&iovad->iova_alloc_lock, flags); spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
spin_lock(&iovad->iova_rbtree_lock);
for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
if (__is_range_overlap(node, pfn_lo, pfn_hi)) { if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
iova = container_of(node, struct iova, node); iova = container_of(node, struct iova, node);
...@@ -402,8 +397,7 @@ reserve_iova(struct iova_domain *iovad, ...@@ -402,8 +397,7 @@ reserve_iova(struct iova_domain *iovad,
iova = __insert_new_range(iovad, pfn_lo, pfn_hi); iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
finish: finish:
spin_unlock(&iovad->iova_rbtree_lock); spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
return iova; return iova;
} }
...@@ -420,8 +414,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) ...@@ -420,8 +414,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
unsigned long flags; unsigned long flags;
struct rb_node *node; struct rb_node *node;
spin_lock_irqsave(&from->iova_alloc_lock, flags); spin_lock_irqsave(&from->iova_rbtree_lock, flags);
spin_lock(&from->iova_rbtree_lock);
for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
struct iova *iova = container_of(node, struct iova, node); struct iova *iova = container_of(node, struct iova, node);
struct iova *new_iova; struct iova *new_iova;
...@@ -430,6 +423,5 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) ...@@ -430,6 +423,5 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
printk(KERN_ERR "Reserve iova range %lx@%lx failed\n", printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
iova->pfn_lo, iova->pfn_lo); iova->pfn_lo, iova->pfn_lo);
} }
spin_unlock(&from->iova_rbtree_lock); spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
spin_unlock_irqrestore(&from->iova_alloc_lock, flags);
} }
...@@ -28,7 +28,6 @@ struct iova { ...@@ -28,7 +28,6 @@ struct iova {
/* holds all the iova translations for a domain */ /* holds all the iova translations for a domain */
struct iova_domain { struct iova_domain {
spinlock_t iova_alloc_lock;/* Lock to protect iova allocation */
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
struct rb_root rbroot; /* iova domain rbtree root */ struct rb_root rbroot; /* iova domain rbtree root */
struct rb_node *cached32_node; /* Save last alloced node */ struct rb_node *cached32_node; /* Save last alloced node */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment