Commit 8dedf4cf authored by Jiang Liu's avatar Jiang Liu Committed by Thomas Gleixner

irq_remapping/vt-d: Change prototypes to prepare for hierarchical irqdomain

Prepare for the conversion to hierarchical irqdomains by changing
function prototypes. No functional changes.
Signed-off-by: default avatarJiang Liu <jiang.liu@linux.intel.com>
Acked-by: default avatarJoerg Roedel <jroedel@suse.de>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: David Cohen <david.a.cohen@linux.intel.com>
Cc: Sander Eikelenboom <linux@eikelenboom.it>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: iommu@lists.linux-foundation.org
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Rafael J. Wysocki <rjw@rjwysocki.net>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dimitri Sivanich <sivanich@sgi.com>
Cc: Joerg Roedel <joro@8bytes.org>
Link: http://lkml.kernel.org/r/1428905519-23704-10-git-send-email-jiang.liu@linux.intel.comSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 947045a2
...@@ -82,10 +82,10 @@ static int get_irte(int irq, struct irte *entry) ...@@ -82,10 +82,10 @@ static int get_irte(int irq, struct irte *entry)
return 0; return 0;
} }
static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) static int alloc_irte(struct intel_iommu *iommu, int irq,
struct irq_2_iommu *irq_iommu, u16 count)
{ {
struct ir_table *table = iommu->ir_table; struct ir_table *table = iommu->ir_table;
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
struct irq_cfg *cfg = irq_cfg(irq); struct irq_cfg *cfg = irq_cfg(irq);
unsigned int mask = 0; unsigned int mask = 0;
unsigned long flags; unsigned long flags;
...@@ -173,9 +173,9 @@ static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subha ...@@ -173,9 +173,9 @@ static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subha
return 0; return 0;
} }
static int modify_irte(int irq, struct irte *irte_modified) static int modify_irte(struct irq_2_iommu *irq_iommu,
struct irte *irte_modified)
{ {
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
struct intel_iommu *iommu; struct intel_iommu *iommu;
unsigned long flags; unsigned long flags;
struct irte *irte; struct irte *irte;
...@@ -242,7 +242,7 @@ static int clear_entries(struct irq_2_iommu *irq_iommu) ...@@ -242,7 +242,7 @@ static int clear_entries(struct irq_2_iommu *irq_iommu)
return 0; return 0;
iommu = irq_iommu->iommu; iommu = irq_iommu->iommu;
index = irq_iommu->irte_index + irq_iommu->sub_handle; index = irq_iommu->irte_index;
start = iommu->ir_table->base + index; start = iommu->ir_table->base + index;
end = start + (1 << irq_iommu->irte_mask); end = start + (1 << irq_iommu->irte_mask);
...@@ -986,7 +986,7 @@ static int intel_setup_ioapic_entry(int irq, ...@@ -986,7 +986,7 @@ static int intel_setup_ioapic_entry(int irq,
pr_warn("No mapping iommu for ioapic %d\n", ioapic_id); pr_warn("No mapping iommu for ioapic %d\n", ioapic_id);
index = -ENODEV; index = -ENODEV;
} else { } else {
index = alloc_irte(iommu, irq, 1); index = alloc_irte(iommu, irq, irq_2_iommu(irq), 1);
if (index < 0) { if (index < 0) {
pr_warn("Failed to allocate IRTE for ioapic %d\n", pr_warn("Failed to allocate IRTE for ioapic %d\n",
ioapic_id); ioapic_id);
...@@ -1002,7 +1002,7 @@ static int intel_setup_ioapic_entry(int irq, ...@@ -1002,7 +1002,7 @@ static int intel_setup_ioapic_entry(int irq,
/* Set source-id of interrupt request */ /* Set source-id of interrupt request */
set_ioapic_sid(&irte, ioapic_id); set_ioapic_sid(&irte, ioapic_id);
modify_irte(irq, &irte); modify_irte(irq_2_iommu(irq), &irte);
apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: " apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: "
"Set IRTE entry (P:%d FPD:%d Dst_Mode:%d " "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d "
...@@ -1089,7 +1089,7 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, ...@@ -1089,7 +1089,7 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
* Atomically updates the IRTE with the new destination, vector * Atomically updates the IRTE with the new destination, vector
* and flushes the interrupt entry cache. * and flushes the interrupt entry cache.
*/ */
modify_irte(irq, &irte); modify_irte(irq_2_iommu(irq), &irte);
/* /*
* After this point, all the interrupts will start arriving * After this point, all the interrupts will start arriving
...@@ -1125,7 +1125,7 @@ static void intel_compose_msi_msg(struct pci_dev *pdev, ...@@ -1125,7 +1125,7 @@ static void intel_compose_msi_msg(struct pci_dev *pdev,
else else
set_hpet_sid(&irte, hpet_id); set_hpet_sid(&irte, hpet_id);
modify_irte(irq, &irte); modify_irte(irq_2_iommu(irq), &irte);
msg->address_hi = MSI_ADDR_BASE_HI; msg->address_hi = MSI_ADDR_BASE_HI;
msg->data = sub_handle; msg->data = sub_handle;
...@@ -1152,7 +1152,7 @@ static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec) ...@@ -1152,7 +1152,7 @@ static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec)
"Unable to map PCI %s to iommu\n", pci_name(dev)); "Unable to map PCI %s to iommu\n", pci_name(dev));
index = -ENOENT; index = -ENOENT;
} else { } else {
index = alloc_irte(iommu, irq, nvec); index = alloc_irte(iommu, irq, irq_2_iommu(irq), nvec);
if (index < 0) { if (index < 0) {
printk(KERN_ERR printk(KERN_ERR
"Unable to allocate %d IRTE for PCI %s\n", "Unable to allocate %d IRTE for PCI %s\n",
...@@ -1196,7 +1196,7 @@ static int intel_alloc_hpet_msi(unsigned int irq, unsigned int id) ...@@ -1196,7 +1196,7 @@ static int intel_alloc_hpet_msi(unsigned int irq, unsigned int id)
down_read(&dmar_global_lock); down_read(&dmar_global_lock);
iommu = map_hpet_to_ir(id); iommu = map_hpet_to_ir(id);
if (iommu) { if (iommu) {
index = alloc_irte(iommu, irq, 1); index = alloc_irte(iommu, irq, irq_2_iommu(irq), 1);
if (index >= 0) if (index >= 0)
ret = 0; ret = 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment