diff options
author | Yu Zhao <yu.zhao@intel.com> | 2009-01-04 16:28:52 +0800 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2009-02-09 11:03:17 +0000 |
commit | 704126ad81b8cb7d3d70adb9ecb143f4d3fb38af (patch) | |
tree | e73c4d595799661757b7505cd67833addef0635e /drivers/pci/intr_remapping.c | |
parent | 43f7392ba9e2585bf34f21399b1ed78692b5d437 (diff) |
VT-d: handle Invalidation Queue Error to avoid system hang
When hardware detects any error with a descriptor from the invalidation
queue, it stops fetching new descriptors from the queue until software
clears the Invalidation Queue Error bit in the Fault Status register.
Following fix handles the IQE so the kernel won't be trapped in an
infinite loop.
Signed-off-by: Yu Zhao <yu.zhao@intel.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci/intr_remapping.c')
-rw-r--r-- | drivers/pci/intr_remapping.c | 21 |
1 files changed, 12 insertions, 9 deletions
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index f78371b22529..45effc5726c0 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c @@ -207,7 +207,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) return index; } -static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask) +static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) { struct qi_desc desc; @@ -215,7 +215,7 @@ static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask) | QI_IEC_SELECTIVE; desc.high = 0; - qi_submit_sync(&desc, iommu); + return qi_submit_sync(&desc, iommu); } int map_irq_to_irte_handle(int irq, u16 *sub_handle) @@ -283,6 +283,7 @@ int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) int modify_irte(int irq, struct irte *irte_modified) { + int rc; int index; struct irte *irte; struct intel_iommu *iommu; @@ -303,14 +304,15 @@ int modify_irte(int irq, struct irte *irte_modified) set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); __iommu_flush_cache(iommu, irte, sizeof(*irte)); - qi_flush_iec(iommu, index, 0); - + rc = qi_flush_iec(iommu, index, 0); spin_unlock(&irq_2_ir_lock); - return 0; + + return rc; } int flush_irte(int irq) { + int rc; int index; struct intel_iommu *iommu; struct irq_2_iommu *irq_iommu; @@ -326,10 +328,10 @@ int flush_irte(int irq) index = irq_iommu->irte_index + irq_iommu->sub_handle; - qi_flush_iec(iommu, index, irq_iommu->irte_mask); + rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); spin_unlock(&irq_2_ir_lock); - return 0; + return rc; } struct intel_iommu *map_ioapic_to_ir(int apic) @@ -355,6 +357,7 @@ struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) int free_irte(int irq) { + int rc = 0; int index, i; struct irte *irte; struct intel_iommu *iommu; @@ -375,7 +378,7 @@ int free_irte(int irq) if (!irq_iommu->sub_handle) { for (i = 0; i < (1 << irq_iommu->irte_mask); i++) set_64bit((unsigned long *)irte, 0); - qi_flush_iec(iommu, index, irq_iommu->irte_mask); + rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); } irq_iommu->iommu = NULL; @@ -385,7 +388,7 @@ int free_irte(int irq) spin_unlock(&irq_2_ir_lock); - return 0; + return rc; } static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) |