Commit d35ac6ac authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-updates-v6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu updates from Joerg Roedel:
 "Core changes:
   - iova_magazine_alloc() optimization
   - Make flush-queue an IOMMU driver capability
   - Consolidate the error handling around device attachment

  AMD IOMMU changes:
   - AVIC Interrupt Remapping Improvements
   - Some minor fixes and cleanups

  Intel VT-d changes from Lu Baolu:
   - Small and misc cleanups

  ARM-SMMU changes from Will Deacon:
   - Device-tree binding updates:
      - Add missing clocks for SC8280XP and SA8775 Adreno SMMUs
      - Add two new Qualcomm SMMUs in SDX75 and SM6375
   - Workarounds for Arm MMU-700 errata:
      - 1076982: Avoid use of SEV-based cmdq wakeup
      - 2812531: Terminate command batches with a CMD_SYNC
      - Enforce single-stage translation to avoid nesting-related errata
   - Set the correct level hint for range TLB invalidation on teardown

  .. and some other minor fixes and cleanups (including Freescale PAMU
  and virtio-iommu changes)"

* tag 'iommu-updates-v6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (50 commits)
  iommu/vt-d: Remove commented-out code
  iommu/vt-d: Remove two WARN_ON in domain_context_mapping_one()
  iommu/vt-d: Handle the failure case of dmar_reenable_qi()
  iommu/vt-d: Remove unnecessary (void*) conversions
  iommu/amd: Remove extern from function prototypes
  iommu/amd: Use BIT/BIT_ULL macro to define bit fields
  iommu/amd: Fix DTE_IRQ_PHYS_ADDR_MASK macro
  iommu/amd: Fix compile error for unused function
  iommu/amd: Improving Interrupt Remapping Table Invalidation
  iommu/amd: Do not Invalidate IRT when IRTE caching is disabled
  iommu/amd: Introduce Disable IRTE Caching Support
  iommu/amd: Remove the unused struct amd_ir_data.ref
  iommu/amd: Switch amd_iommu_update_ga() to use modify_irte_ga()
  iommu/arm-smmu-v3: Set TTL invalidation hint better
  iommu/arm-smmu-v3: Document nesting-related errata
  iommu/arm-smmu-v3: Add explicit feature for nesting
  iommu/arm-smmu-v3: Document MMU-700 erratum 2812531
  iommu/arm-smmu-v3: Work around MMU-600 erratum 1076982
  dt-bindings: arm-smmu: Add SDX75 SMMU compatible
  dt-bindings: arm-smmu: Add SM6375 GPU SMMU
  ...
parents 0b26eadb a7a33407
...@@ -254,6 +254,7 @@ ForEachMacros: ...@@ -254,6 +254,7 @@ ForEachMacros:
- 'for_each_free_mem_range' - 'for_each_free_mem_range'
- 'for_each_free_mem_range_reverse' - 'for_each_free_mem_range_reverse'
- 'for_each_func_rsrc' - 'for_each_func_rsrc'
- 'for_each_group_device'
- 'for_each_group_evsel' - 'for_each_group_evsel'
- 'for_each_group_member' - 'for_each_group_member'
- 'for_each_hstate' - 'for_each_hstate'
......
...@@ -323,6 +323,7 @@ ...@@ -323,6 +323,7 @@
option with care. option with care.
pgtbl_v1 - Use v1 page table for DMA-API (Default). pgtbl_v1 - Use v1 page table for DMA-API (Default).
pgtbl_v2 - Use v2 page table for DMA-API. pgtbl_v2 - Use v2 page table for DMA-API.
irtcachedis - Disable Interrupt Remapping Table (IRT) caching.
amd_iommu_dump= [HW,X86-64] amd_iommu_dump= [HW,X86-64]
Enable AMD IOMMU driver option to dump the ACPI table Enable AMD IOMMU driver option to dump the ACPI table
......
...@@ -140,6 +140,10 @@ stable kernels. ...@@ -140,6 +140,10 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | MMU-500 | #841119,826419 | N/A | | ARM | MMU-500 | #841119,826419 | N/A |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | MMU-600 | #1076982,1209401| N/A |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | MMU-700 | #2268618,2812531| N/A |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| Broadcom | Brahma-B53 | N/A | ARM64_ERRATUM_845719 | | Broadcom | Brahma-B53 | N/A | ARM64_ERRATUM_845719 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
......
...@@ -29,6 +29,7 @@ properties: ...@@ -29,6 +29,7 @@ properties:
- qcom,msm8996-smmu-v2 - qcom,msm8996-smmu-v2
- qcom,msm8998-smmu-v2 - qcom,msm8998-smmu-v2
- qcom,sdm630-smmu-v2 - qcom,sdm630-smmu-v2
- qcom,sm6375-smmu-v2
- const: qcom,smmu-v2 - const: qcom,smmu-v2
- description: Qcom SoCs implementing "qcom,smmu-500" and "arm,mmu-500" - description: Qcom SoCs implementing "qcom,smmu-500" and "arm,mmu-500"
...@@ -45,6 +46,7 @@ properties: ...@@ -45,6 +46,7 @@ properties:
- qcom,sdm845-smmu-500 - qcom,sdm845-smmu-500
- qcom,sdx55-smmu-500 - qcom,sdx55-smmu-500
- qcom,sdx65-smmu-500 - qcom,sdx65-smmu-500
- qcom,sdx75-smmu-500
- qcom,sm6115-smmu-500 - qcom,sm6115-smmu-500
- qcom,sm6125-smmu-500 - qcom,sm6125-smmu-500
- qcom,sm6350-smmu-500 - qcom,sm6350-smmu-500
...@@ -79,7 +81,9 @@ properties: ...@@ -79,7 +81,9 @@ properties:
- description: Qcom Adreno GPUs implementing "qcom,smmu-500" and "arm,mmu-500" - description: Qcom Adreno GPUs implementing "qcom,smmu-500" and "arm,mmu-500"
items: items:
- enum: - enum:
- qcom,sa8775p-smmu-500
- qcom,sc7280-smmu-500 - qcom,sc7280-smmu-500
- qcom,sc8280xp-smmu-500
- qcom,sm6115-smmu-500 - qcom,sm6115-smmu-500
- qcom,sm6125-smmu-500 - qcom,sm6125-smmu-500
- qcom,sm8150-smmu-500 - qcom,sm8150-smmu-500
...@@ -267,6 +271,7 @@ allOf: ...@@ -267,6 +271,7 @@ allOf:
enum: enum:
- qcom,msm8998-smmu-v2 - qcom,msm8998-smmu-v2
- qcom,sdm630-smmu-v2 - qcom,sdm630-smmu-v2
- qcom,sm6375-smmu-v2
then: then:
anyOf: anyOf:
- properties: - properties:
...@@ -331,7 +336,10 @@ allOf: ...@@ -331,7 +336,10 @@ allOf:
properties: properties:
compatible: compatible:
contains: contains:
const: qcom,sc7280-smmu-500 enum:
- qcom,sa8775p-smmu-500
- qcom,sc7280-smmu-500
- qcom,sc8280xp-smmu-500
then: then:
properties: properties:
clock-names: clock-names:
...@@ -413,10 +421,8 @@ allOf: ...@@ -413,10 +421,8 @@ allOf:
- nvidia,smmu-500 - nvidia,smmu-500
- qcom,qcm2290-smmu-500 - qcom,qcm2290-smmu-500
- qcom,qdu1000-smmu-500 - qcom,qdu1000-smmu-500
- qcom,sa8775p-smmu-500
- qcom,sc7180-smmu-500 - qcom,sc7180-smmu-500
- qcom,sc8180x-smmu-500 - qcom,sc8180x-smmu-500
- qcom,sc8280xp-smmu-500
- qcom,sdm670-smmu-500 - qcom,sdm670-smmu-500
- qcom,sdm845-smmu-500 - qcom,sdm845-smmu-500
- qcom,sdx55-smmu-500 - qcom,sdx55-smmu-500
......
...@@ -1353,6 +1353,7 @@ static struct platform_driver fsl_pci_driver = { ...@@ -1353,6 +1353,7 @@ static struct platform_driver fsl_pci_driver = {
.of_match_table = pci_ids, .of_match_table = pci_ids,
}, },
.probe = fsl_pci_probe, .probe = fsl_pci_probe,
.driver_managed_dma = true,
}; };
static int __init fsl_pci_init(void) static int __init fsl_pci_init(void)
......
...@@ -11,12 +11,15 @@ ...@@ -11,12 +11,15 @@
#include "amd_iommu_types.h" #include "amd_iommu_types.h"
extern irqreturn_t amd_iommu_int_thread(int irq, void *data); irqreturn_t amd_iommu_int_thread(int irq, void *data);
extern irqreturn_t amd_iommu_int_handler(int irq, void *data); irqreturn_t amd_iommu_int_handler(int irq, void *data);
extern void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid); void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid);
extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu); void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
extern void amd_iommu_restart_ga_log(struct amd_iommu *iommu); void amd_iommu_restart_ga_log(struct amd_iommu *iommu);
extern void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid); int amd_iommu_init_devices(void);
void amd_iommu_uninit_devices(void);
void amd_iommu_init_notifier(void);
void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid);
#ifdef CONFIG_AMD_IOMMU_DEBUGFS #ifdef CONFIG_AMD_IOMMU_DEBUGFS
void amd_iommu_debugfs_setup(struct amd_iommu *iommu); void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
...@@ -25,11 +28,11 @@ static inline void amd_iommu_debugfs_setup(struct amd_iommu *iommu) {} ...@@ -25,11 +28,11 @@ static inline void amd_iommu_debugfs_setup(struct amd_iommu *iommu) {}
#endif #endif
/* Needed for interrupt remapping */ /* Needed for interrupt remapping */
extern int amd_iommu_prepare(void); int amd_iommu_prepare(void);
extern int amd_iommu_enable(void); int amd_iommu_enable(void);
extern void amd_iommu_disable(void); void amd_iommu_disable(void);
extern int amd_iommu_reenable(int); int amd_iommu_reenable(int mode);
extern int amd_iommu_enable_faulting(void); int amd_iommu_enable_faulting(void);
extern int amd_iommu_guest_ir; extern int amd_iommu_guest_ir;
extern enum io_pgtable_fmt amd_iommu_pgtable; extern enum io_pgtable_fmt amd_iommu_pgtable;
extern int amd_iommu_gpt_level; extern int amd_iommu_gpt_level;
...@@ -37,33 +40,32 @@ extern int amd_iommu_gpt_level; ...@@ -37,33 +40,32 @@ extern int amd_iommu_gpt_level;
/* IOMMUv2 specific functions */ /* IOMMUv2 specific functions */
struct iommu_domain; struct iommu_domain;
extern bool amd_iommu_v2_supported(void); bool amd_iommu_v2_supported(void);
extern struct amd_iommu *get_amd_iommu(unsigned int idx); struct amd_iommu *get_amd_iommu(unsigned int idx);
extern u8 amd_iommu_pc_get_max_banks(unsigned int idx); u8 amd_iommu_pc_get_max_banks(unsigned int idx);
extern bool amd_iommu_pc_supported(void); bool amd_iommu_pc_supported(void);
extern u8 amd_iommu_pc_get_max_counters(unsigned int idx); u8 amd_iommu_pc_get_max_counters(unsigned int idx);
extern int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
u8 fxn, u64 *value); u8 fxn, u64 *value);
extern int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
u8 fxn, u64 *value); u8 fxn, u64 *value);
extern int amd_iommu_register_ppr_notifier(struct notifier_block *nb); int amd_iommu_register_ppr_notifier(struct notifier_block *nb);
extern int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb); int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb);
extern void amd_iommu_domain_direct_map(struct iommu_domain *dom); void amd_iommu_domain_direct_map(struct iommu_domain *dom);
extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids); int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
extern int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid, int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid, u64 address);
u64 address); void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
extern void amd_iommu_update_and_flush_device_table(struct protection_domain *domain); void amd_iommu_domain_update(struct protection_domain *domain);
extern void amd_iommu_domain_update(struct protection_domain *domain); void amd_iommu_domain_flush_complete(struct protection_domain *domain);
extern void amd_iommu_domain_flush_complete(struct protection_domain *domain); void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain);
extern void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain); int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid);
extern int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid); int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid, unsigned long cr3);
unsigned long cr3); int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid);
extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid);
#ifdef CONFIG_IRQ_REMAP #ifdef CONFIG_IRQ_REMAP
extern int amd_iommu_create_irq_domain(struct amd_iommu *iommu); int amd_iommu_create_irq_domain(struct amd_iommu *iommu);
#else #else
static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu) static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
{ {
...@@ -75,8 +77,8 @@ static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu) ...@@ -75,8 +77,8 @@ static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
#define PPR_INVALID 0x1 #define PPR_INVALID 0x1
#define PPR_FAILURE 0xf #define PPR_FAILURE 0xf
extern int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid, int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
int status, int tag); int status, int tag);
static inline bool is_rd890_iommu(struct pci_dev *pdev) static inline bool is_rd890_iommu(struct pci_dev *pdev)
{ {
...@@ -129,10 +131,9 @@ static inline void *alloc_pgtable_page(int nid, gfp_t gfp) ...@@ -129,10 +131,9 @@ static inline void *alloc_pgtable_page(int nid, gfp_t gfp)
return page ? page_address(page) : NULL; return page ? page_address(page) : NULL;
} }
extern bool translation_pre_enabled(struct amd_iommu *iommu); bool translation_pre_enabled(struct amd_iommu *iommu);
extern bool amd_iommu_is_attach_deferred(struct device *dev); bool amd_iommu_is_attach_deferred(struct device *dev);
extern int __init add_special_device(u8 type, u8 id, u32 *devid, int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line);
bool cmd_line);
#ifdef CONFIG_DMI #ifdef CONFIG_DMI
void amd_iommu_apply_ivrs_quirks(void); void amd_iommu_apply_ivrs_quirks(void);
...@@ -140,9 +141,9 @@ void amd_iommu_apply_ivrs_quirks(void); ...@@ -140,9 +141,9 @@ void amd_iommu_apply_ivrs_quirks(void);
static inline void amd_iommu_apply_ivrs_quirks(void) { } static inline void amd_iommu_apply_ivrs_quirks(void) { }
#endif #endif
extern void amd_iommu_domain_set_pgtable(struct protection_domain *domain, void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
u64 *root, int mode); u64 *root, int mode);
extern struct dev_table_entry *get_dev_table(struct amd_iommu *iommu); struct dev_table_entry *get_dev_table(struct amd_iommu *iommu);
extern u64 amd_iommu_efr; extern u64 amd_iommu_efr;
extern u64 amd_iommu_efr2; extern u64 amd_iommu_efr2;
......
...@@ -84,21 +84,21 @@ ...@@ -84,21 +84,21 @@
/* Extended Feature Bits */ /* Extended Feature Bits */
#define FEATURE_PREFETCH (1ULL<<0) #define FEATURE_PREFETCH BIT_ULL(0)
#define FEATURE_PPR (1ULL<<1) #define FEATURE_PPR BIT_ULL(1)
#define FEATURE_X2APIC (1ULL<<2) #define FEATURE_X2APIC BIT_ULL(2)
#define FEATURE_NX (1ULL<<3) #define FEATURE_NX BIT_ULL(3)
#define FEATURE_GT (1ULL<<4) #define FEATURE_GT BIT_ULL(4)
#define FEATURE_IA (1ULL<<6) #define FEATURE_IA BIT_ULL(6)
#define FEATURE_GA (1ULL<<7) #define FEATURE_GA BIT_ULL(7)
#define FEATURE_HE (1ULL<<8) #define FEATURE_HE BIT_ULL(8)
#define FEATURE_PC (1ULL<<9) #define FEATURE_PC BIT_ULL(9)
#define FEATURE_GATS_SHIFT (12) #define FEATURE_GATS_SHIFT (12)
#define FEATURE_GATS_MASK (3ULL) #define FEATURE_GATS_MASK (3ULL)
#define FEATURE_GAM_VAPIC (1ULL<<21) #define FEATURE_GAM_VAPIC BIT_ULL(21)
#define FEATURE_GIOSUP (1ULL<<48) #define FEATURE_GIOSUP BIT_ULL(48)
#define FEATURE_EPHSUP (1ULL<<50) #define FEATURE_EPHSUP BIT_ULL(50)
#define FEATURE_SNP (1ULL<<63) #define FEATURE_SNP BIT_ULL(63)
#define FEATURE_PASID_SHIFT 32 #define FEATURE_PASID_SHIFT 32
#define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT) #define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT)
...@@ -120,13 +120,13 @@ ...@@ -120,13 +120,13 @@
#define PASID_MASK 0x0000ffff #define PASID_MASK 0x0000ffff
/* MMIO status bits */ /* MMIO status bits */
#define MMIO_STATUS_EVT_OVERFLOW_INT_MASK (1 << 0) #define MMIO_STATUS_EVT_OVERFLOW_INT_MASK BIT(0)
#define MMIO_STATUS_EVT_INT_MASK (1 << 1) #define MMIO_STATUS_EVT_INT_MASK BIT(1)
#define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2) #define MMIO_STATUS_COM_WAIT_INT_MASK BIT(2)
#define MMIO_STATUS_PPR_INT_MASK (1 << 6) #define MMIO_STATUS_PPR_INT_MASK BIT(6)
#define MMIO_STATUS_GALOG_RUN_MASK (1 << 8) #define MMIO_STATUS_GALOG_RUN_MASK BIT(8)
#define MMIO_STATUS_GALOG_OVERFLOW_MASK (1 << 9) #define MMIO_STATUS_GALOG_OVERFLOW_MASK BIT(9)
#define MMIO_STATUS_GALOG_INT_MASK (1 << 10) #define MMIO_STATUS_GALOG_INT_MASK BIT(10)
/* event logging constants */ /* event logging constants */
#define EVENT_ENTRY_SIZE 0x10 #define EVENT_ENTRY_SIZE 0x10
...@@ -174,6 +174,7 @@ ...@@ -174,6 +174,7 @@
#define CONTROL_GAINT_EN 29 #define CONTROL_GAINT_EN 29
#define CONTROL_XT_EN 50 #define CONTROL_XT_EN 50
#define CONTROL_INTCAPXT_EN 51 #define CONTROL_INTCAPXT_EN 51
#define CONTROL_IRTCACHEDIS 59
#define CONTROL_SNPAVIC_EN 61 #define CONTROL_SNPAVIC_EN 61
#define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT) #define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT)
...@@ -283,7 +284,7 @@ ...@@ -283,7 +284,7 @@
#define AMD_IOMMU_PGSIZES_V2 (PAGE_SIZE | (1ULL << 21) | (1ULL << 30)) #define AMD_IOMMU_PGSIZES_V2 (PAGE_SIZE | (1ULL << 21) | (1ULL << 30))
/* Bit value definition for dte irq remapping fields*/ /* Bit value definition for dte irq remapping fields*/
#define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6) #define DTE_IRQ_PHYS_ADDR_MASK GENMASK_ULL(51, 6)
#define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60) #define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60)
#define DTE_IRQ_REMAP_INTCTL (2ULL << 60) #define DTE_IRQ_REMAP_INTCTL (2ULL << 60)
#define DTE_IRQ_REMAP_ENABLE 1ULL #define DTE_IRQ_REMAP_ENABLE 1ULL
...@@ -369,23 +370,23 @@ ...@@ -369,23 +370,23 @@
/* /*
* Bit value definition for I/O PTE fields * Bit value definition for I/O PTE fields
*/ */
#define IOMMU_PTE_PR (1ULL << 0) #define IOMMU_PTE_PR BIT_ULL(0)
#define IOMMU_PTE_U (1ULL << 59) #define IOMMU_PTE_U BIT_ULL(59)
#define IOMMU_PTE_FC (1ULL << 60) #define IOMMU_PTE_FC BIT_ULL(60)
#define IOMMU_PTE_IR (1ULL << 61) #define IOMMU_PTE_IR BIT_ULL(61)
#define IOMMU_PTE_IW (1ULL << 62) #define IOMMU_PTE_IW BIT_ULL(62)
/* /*
* Bit value definition for DTE fields * Bit value definition for DTE fields
*/ */
#define DTE_FLAG_V (1ULL << 0) #define DTE_FLAG_V BIT_ULL(0)
#define DTE_FLAG_TV (1ULL << 1) #define DTE_FLAG_TV BIT_ULL(1)
#define DTE_FLAG_IR (1ULL << 61) #define DTE_FLAG_IR BIT_ULL(61)
#define DTE_FLAG_IW (1ULL << 62) #define DTE_FLAG_IW BIT_ULL(62)
#define DTE_FLAG_IOTLB (1ULL << 32) #define DTE_FLAG_IOTLB BIT_ULL(32)
#define DTE_FLAG_GIOV (1ULL << 54) #define DTE_FLAG_GIOV BIT_ULL(54)
#define DTE_FLAG_GV (1ULL << 55) #define DTE_FLAG_GV BIT_ULL(55)
#define DTE_FLAG_MASK (0x3ffULL << 32) #define DTE_FLAG_MASK (0x3ffULL << 32)
#define DTE_GLX_SHIFT (56) #define DTE_GLX_SHIFT (56)
#define DTE_GLX_MASK (3) #define DTE_GLX_MASK (3)
...@@ -439,13 +440,13 @@ ...@@ -439,13 +440,13 @@
#define MAX_DOMAIN_ID 65536 #define MAX_DOMAIN_ID 65536
/* Protection domain flags */ /* Protection domain flags */
#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ #define PD_DMA_OPS_MASK BIT(0) /* domain used for dma_ops */
#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops #define PD_DEFAULT_MASK BIT(1) /* domain is a default dma_ops
domain for an IOMMU */ domain for an IOMMU */
#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page #define PD_PASSTHROUGH_MASK BIT(2) /* domain has no page
translation */ translation */
#define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */ #define PD_IOMMUV2_MASK BIT(3) /* domain has gcr3 table */
#define PD_GIOV_MASK (1UL << 4) /* domain enable GIOV support */ #define PD_GIOV_MASK BIT(4) /* domain enable GIOV support */
extern bool amd_iommu_dump; extern bool amd_iommu_dump;
#define DUMP_printk(format, arg...) \ #define DUMP_printk(format, arg...) \
...@@ -716,6 +717,9 @@ struct amd_iommu { ...@@ -716,6 +717,9 @@ struct amd_iommu {
/* if one, we need to send a completion wait command */ /* if one, we need to send a completion wait command */
bool need_sync; bool need_sync;
/* true if disable irte caching */
bool irtcachedis_enabled;
/* Handle for IOMMU core code */ /* Handle for IOMMU core code */
struct iommu_device iommu; struct iommu_device iommu;
...@@ -748,7 +752,7 @@ struct amd_iommu { ...@@ -748,7 +752,7 @@ struct amd_iommu {
u32 flags; u32 flags;
volatile u64 *cmd_sem; volatile u64 *cmd_sem;
u64 cmd_sem_val; atomic64_t cmd_sem_val;
#ifdef CONFIG_AMD_IOMMU_DEBUGFS #ifdef CONFIG_AMD_IOMMU_DEBUGFS
/* DebugFS Info */ /* DebugFS Info */
...@@ -882,7 +886,7 @@ extern int amd_iommu_max_glx_val; ...@@ -882,7 +886,7 @@ extern int amd_iommu_max_glx_val;
* This function flushes all internal caches of * This function flushes all internal caches of
* the IOMMU used by this driver. * the IOMMU used by this driver.
*/ */
extern void iommu_flush_all_caches(struct amd_iommu *iommu); void iommu_flush_all_caches(struct amd_iommu *iommu);
static inline int get_ioapic_devid(int id) static inline int get_ioapic_devid(int id)
{ {
...@@ -1006,7 +1010,6 @@ struct amd_ir_data { ...@@ -1006,7 +1010,6 @@ struct amd_ir_data {
struct irq_2_irte irq_2_irte; struct irq_2_irte irq_2_irte;
struct msi_msg msi_entry; struct msi_msg msi_entry;
void *entry; /* Pointer to union irte or struct irte_ga */ void *entry; /* Pointer to union irte or struct irte_ga */
void *ref; /* Pointer to the actual irte */
/** /**
* Store information for activate/de-activate * Store information for activate/de-activate
......
...@@ -162,6 +162,7 @@ static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; ...@@ -162,6 +162,7 @@ static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
static bool amd_iommu_detected; static bool amd_iommu_detected;
static bool amd_iommu_disabled __initdata; static bool amd_iommu_disabled __initdata;
static bool amd_iommu_force_enable __initdata; static bool amd_iommu_force_enable __initdata;
static bool amd_iommu_irtcachedis;
static int amd_iommu_target_ivhd_type; static int amd_iommu_target_ivhd_type;
/* Global EFR and EFR2 registers */ /* Global EFR and EFR2 registers */
...@@ -484,6 +485,9 @@ static void iommu_disable(struct amd_iommu *iommu) ...@@ -484,6 +485,9 @@ static void iommu_disable(struct amd_iommu *iommu)
/* Disable IOMMU hardware itself */ /* Disable IOMMU hardware itself */
iommu_feature_disable(iommu, CONTROL_IOMMU_EN); iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
/* Clear IRTE cache disabling bit */
iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS);
} }
/* /*
...@@ -1753,7 +1757,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h, ...@@ -1753,7 +1757,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h,
iommu->pci_seg = pci_seg; iommu->pci_seg = pci_seg;
raw_spin_lock_init(&iommu->lock); raw_spin_lock_init(&iommu->lock);
iommu->cmd_sem_val = 0; atomic64_set(&iommu->cmd_sem_val, 0);
/* Add IOMMU to internal data structures */ /* Add IOMMU to internal data structures */
list_add_tail(&iommu->list, &amd_iommu_list); list_add_tail(&iommu->list, &amd_iommu_list);
...@@ -2710,6 +2714,33 @@ static void iommu_enable_ga(struct amd_iommu *iommu) ...@@ -2710,6 +2714,33 @@ static void iommu_enable_ga(struct amd_iommu *iommu)
#endif #endif
} }
static void iommu_disable_irtcachedis(struct amd_iommu *iommu)
{
iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS);
}
static void iommu_enable_irtcachedis(struct amd_iommu *iommu)
{
u64 ctrl;
if (!amd_iommu_irtcachedis)
return;
/*
* Note:
* The support for IRTCacheDis feature is dertermined by
* checking if the bit is writable.
*/
iommu_feature_enable(iommu, CONTROL_IRTCACHEDIS);
ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
ctrl &= (1ULL << CONTROL_IRTCACHEDIS);
if (ctrl)
iommu->irtcachedis_enabled = true;
pr_info("iommu%d (%#06x) : IRT cache is %s\n",
iommu->index, iommu->devid,
iommu->irtcachedis_enabled ? "disabled" : "enabled");
}
static void early_enable_iommu(struct amd_iommu *iommu) static void early_enable_iommu(struct amd_iommu *iommu)
{ {
iommu_disable(iommu); iommu_disable(iommu);
...@@ -2720,6 +2751,7 @@ static void early_enable_iommu(struct amd_iommu *iommu) ...@@ -2720,6 +2751,7 @@ static void early_enable_iommu(struct amd_iommu *iommu)
iommu_set_exclusion_range(iommu); iommu_set_exclusion_range(iommu);
iommu_enable_ga(iommu); iommu_enable_ga(iommu);
iommu_enable_xt(iommu); iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu);
iommu_enable(iommu); iommu_enable(iommu);
iommu_flush_all_caches(iommu); iommu_flush_all_caches(iommu);
} }
...@@ -2770,10 +2802,12 @@ static void early_enable_iommus(void) ...@@ -2770,10 +2802,12 @@ static void early_enable_iommus(void)
for_each_iommu(iommu) { for_each_iommu(iommu) {
iommu_disable_command_buffer(iommu); iommu_disable_command_buffer(iommu);
iommu_disable_event_buffer(iommu); iommu_disable_event_buffer(iommu);
iommu_disable_irtcachedis(iommu);
iommu_enable_command_buffer(iommu); iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu); iommu_enable_event_buffer(iommu);
iommu_enable_ga(iommu); iommu_enable_ga(iommu);
iommu_enable_xt(iommu); iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu);
iommu_set_device_table(iommu); iommu_set_device_table(iommu);
iommu_flush_all_caches(iommu); iommu_flush_all_caches(iommu);
} }
...@@ -3426,6 +3460,8 @@ static int __init parse_amd_iommu_options(char *str) ...@@ -3426,6 +3460,8 @@ static int __init parse_amd_iommu_options(char *str)
amd_iommu_pgtable = AMD_IOMMU_V1; amd_iommu_pgtable = AMD_IOMMU_V1;
} else if (strncmp(str, "pgtbl_v2", 8) == 0) { } else if (strncmp(str, "pgtbl_v2", 8) == 0) {
amd_iommu_pgtable = AMD_IOMMU_V2; amd_iommu_pgtable = AMD_IOMMU_V2;
} else if (strncmp(str, "irtcachedis", 11) == 0) {
amd_iommu_irtcachedis = true;
} else { } else {
pr_notice("Unknown option - '%s'\n", str); pr_notice("Unknown option - '%s'\n", str);
} }
......
...@@ -310,8 +310,8 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable, ...@@ -310,8 +310,8 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
return NULL; return NULL;
/* Large PTE */ /* Large PTE */
if (PM_PTE_LEVEL(*pte) == 7 || if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL ||
PM_PTE_LEVEL(*pte) == 0) PM_PTE_LEVEL(*pte) == PAGE_MODE_NONE)
break; break;
/* No level skipping support yet */ /* No level skipping support yet */
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
/* /*
* CPU-agnostic AMD IO page table v2 allocator. * CPU-agnostic AMD IO page table v2 allocator.
* *
* Copyright (C) 2022 Advanced Micro Devices, Inc. * Copyright (C) 2022, 2023 Advanced Micro Devices, Inc.
* Author: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> * Author: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
* Author: Vasant Hegde <vasant.hegde@amd.com> * Author: Vasant Hegde <vasant.hegde@amd.com>
*/ */
......
...@@ -1182,11 +1182,11 @@ static int iommu_completion_wait(struct amd_iommu *iommu) ...@@ -1182,11 +1182,11 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
if (!iommu->need_sync) if (!iommu->need_sync)
return 0; return 0;
raw_spin_lock_irqsave(&iommu->lock, flags); data = atomic64_add_return(1, &iommu->cmd_sem_val);
data = ++iommu->cmd_sem_val;
build_completion_wait(&cmd, iommu, data); build_completion_wait(&cmd, iommu, data);
raw_spin_lock_irqsave(&iommu->lock, flags);
ret = __iommu_queue_command_sync(iommu, &cmd, false); ret = __iommu_queue_command_sync(iommu, &cmd, false);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
...@@ -1273,6 +1273,9 @@ static void amd_iommu_flush_irt_all(struct amd_iommu *iommu) ...@@ -1273,6 +1273,9 @@ static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
u32 devid; u32 devid;
u16 last_bdf = iommu->pci_seg->last_bdf; u16 last_bdf = iommu->pci_seg->last_bdf;
if (iommu->irtcachedis_enabled)
return;
for (devid = 0; devid <= last_bdf; devid++) for (devid = 0; devid <= last_bdf; devid++)
iommu_flush_irt(iommu, devid); iommu_flush_irt(iommu, devid);
...@@ -2313,6 +2316,8 @@ static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap) ...@@ -2313,6 +2316,8 @@ static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap)
return amdr_ivrs_remap_support; return amdr_ivrs_remap_support;
case IOMMU_CAP_ENFORCE_CACHE_COHERENCY: case IOMMU_CAP_ENFORCE_CACHE_COHERENCY:
return true; return true;
case IOMMU_CAP_DEFERRED_FLUSH:
return true;
default: default:
break; break;
} }
...@@ -2822,6 +2827,32 @@ EXPORT_SYMBOL(amd_iommu_device_info); ...@@ -2822,6 +2827,32 @@ EXPORT_SYMBOL(amd_iommu_device_info);
static struct irq_chip amd_ir_chip; static struct irq_chip amd_ir_chip;
static DEFINE_SPINLOCK(iommu_table_lock); static DEFINE_SPINLOCK(iommu_table_lock);
static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
{
int ret;
u64 data;
unsigned long flags;
struct iommu_cmd cmd, cmd2;
if (iommu->irtcachedis_enabled)
return;
build_inv_irt(&cmd, devid);
data = atomic64_add_return(1, &iommu->cmd_sem_val);
build_completion_wait(&cmd2, iommu, data);
raw_spin_lock_irqsave(&iommu->lock, flags);
ret = __iommu_queue_command_sync(iommu, &cmd, true);
if (ret)
goto out;
ret = __iommu_queue_command_sync(iommu, &cmd2, false);
if (ret)
goto out;
wait_on_sem(iommu, data);
out:
raw_spin_unlock_irqrestore(&iommu->lock, flags);
}
static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid, static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
struct irq_remap_table *table) struct irq_remap_table *table)
{ {
...@@ -3021,7 +3052,7 @@ static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count, ...@@ -3021,7 +3052,7 @@ static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count,
} }
static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
struct irte_ga *irte, struct amd_ir_data *data) struct irte_ga *irte)
{ {
struct irq_remap_table *table; struct irq_remap_table *table;
struct irte_ga *entry; struct irte_ga *entry;
...@@ -3046,13 +3077,9 @@ static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, ...@@ -3046,13 +3077,9 @@ static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
old = entry->irte; old = entry->irte;
WARN_ON(!try_cmpxchg128(&entry->irte, &old, irte->irte)); WARN_ON(!try_cmpxchg128(&entry->irte, &old, irte->irte));
if (data)
data->ref = entry;
raw_spin_unlock_irqrestore(&table->lock, flags); raw_spin_unlock_irqrestore(&table->lock, flags);
iommu_flush_irt(iommu, devid); iommu_flush_irt_and_complete(iommu, devid);
iommu_completion_wait(iommu);
return 0; return 0;
} }
...@@ -3071,8 +3098,7 @@ static int modify_irte(struct amd_iommu *iommu, ...@@ -3071,8 +3098,7 @@ static int modify_irte(struct amd_iommu *iommu,
table->table[index] = irte->val; table->table[index] = irte->val;
raw_spin_unlock_irqrestore(&table->lock, flags); raw_spin_unlock_irqrestore(&table->lock, flags);
iommu_flush_irt(iommu, devid); iommu_flush_irt_and_complete(iommu, devid);
iommu_completion_wait(iommu);
return 0; return 0;
} }
...@@ -3090,8 +3116,7 @@ static void free_irte(struct amd_iommu *iommu, u16 devid, int index) ...@@ -3090,8 +3116,7 @@ static void free_irte(struct amd_iommu *iommu, u16 devid, int index)
iommu->irte_ops->clear_allocated(table, index); iommu->irte_ops->clear_allocated(table, index);
raw_spin_unlock_irqrestore(&table->lock, flags); raw_spin_unlock_irqrestore(&table->lock, flags);
iommu_flush_irt(iommu, devid); iommu_flush_irt_and_complete(iommu, devid);
iommu_completion_wait(iommu);
} }
static void irte_prepare(void *entry, static void irte_prepare(void *entry,
...@@ -3137,7 +3162,7 @@ static void irte_ga_activate(struct amd_iommu *iommu, void *entry, u16 devid, u1 ...@@ -3137,7 +3162,7 @@ static void irte_ga_activate(struct amd_iommu *iommu, void *entry, u16 devid, u1
struct irte_ga *irte = (struct irte_ga *) entry; struct irte_ga *irte = (struct irte_ga *) entry;
irte->lo.fields_remap.valid = 1; irte->lo.fields_remap.valid = 1;
modify_irte_ga(iommu, devid, index, irte, NULL); modify_irte_ga(iommu, devid, index, irte);
} }
static void irte_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) static void irte_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
...@@ -3153,7 +3178,7 @@ static void irte_ga_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, ...@@ -3153,7 +3178,7 @@ static void irte_ga_deactivate(struct amd_iommu *iommu, void *entry, u16 devid,
struct irte_ga *irte = (struct irte_ga *) entry; struct irte_ga *irte = (struct irte_ga *) entry;
irte->lo.fields_remap.valid = 0; irte->lo.fields_remap.valid = 0;
modify_irte_ga(iommu, devid, index, irte, NULL); modify_irte_ga(iommu, devid, index, irte);
} }
static void irte_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index, static void irte_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
...@@ -3177,7 +3202,7 @@ static void irte_ga_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid ...@@ -3177,7 +3202,7 @@ static void irte_ga_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid
APICID_TO_IRTE_DEST_LO(dest_apicid); APICID_TO_IRTE_DEST_LO(dest_apicid);
irte->hi.fields.destination = irte->hi.fields.destination =
APICID_TO_IRTE_DEST_HI(dest_apicid); APICID_TO_IRTE_DEST_HI(dest_apicid);
modify_irte_ga(iommu, devid, index, irte, NULL); modify_irte_ga(iommu, devid, index, irte);
} }
} }
...@@ -3527,7 +3552,7 @@ int amd_iommu_activate_guest_mode(void *data) ...@@ -3527,7 +3552,7 @@ int amd_iommu_activate_guest_mode(void *data)
entry->lo.fields_vapic.ga_tag = ir_data->ga_tag; entry->lo.fields_vapic.ga_tag = ir_data->ga_tag;
return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
ir_data->irq_2_irte.index, entry, ir_data); ir_data->irq_2_irte.index, entry);
} }
EXPORT_SYMBOL(amd_iommu_activate_guest_mode); EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
...@@ -3557,7 +3582,7 @@ int amd_iommu_deactivate_guest_mode(void *data) ...@@ -3557,7 +3582,7 @@ int amd_iommu_deactivate_guest_mode(void *data)
APICID_TO_IRTE_DEST_HI(cfg->dest_apicid); APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
ir_data->irq_2_irte.index, entry, ir_data); ir_data->irq_2_irte.index, entry);
} }
EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode); EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
...@@ -3719,44 +3744,26 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu) ...@@ -3719,44 +3744,26 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
int amd_iommu_update_ga(int cpu, bool is_run, void *data) int amd_iommu_update_ga(int cpu, bool is_run, void *data)
{ {
unsigned long flags;
struct amd_iommu *iommu;
struct irq_remap_table *table;
struct amd_ir_data *ir_data = (struct amd_ir_data *)data; struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
int devid = ir_data->irq_2_irte.devid;
struct irte_ga *entry = (struct irte_ga *) ir_data->entry; struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
struct irte_ga *ref = (struct irte_ga *) ir_data->ref;
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
!ref || !entry || !entry->lo.fields_vapic.guest_mode) !entry || !entry->lo.fields_vapic.guest_mode)
return 0; return 0;
iommu = ir_data->iommu; if (!ir_data->iommu)
if (!iommu)
return -ENODEV; return -ENODEV;
table = get_irq_table(iommu, devid); if (cpu >= 0) {
if (!table) entry->lo.fields_vapic.destination =
return -ENODEV; APICID_TO_IRTE_DEST_LO(cpu);
entry->hi.fields.destination =
raw_spin_lock_irqsave(&table->lock, flags); APICID_TO_IRTE_DEST_HI(cpu);
if (ref->lo.fields_vapic.guest_mode) {
if (cpu >= 0) {
ref->lo.fields_vapic.destination =
APICID_TO_IRTE_DEST_LO(cpu);
ref->hi.fields.destination =
APICID_TO_IRTE_DEST_HI(cpu);
}
ref->lo.fields_vapic.is_run = is_run;
barrier();
} }
entry->lo.fields_vapic.is_run = is_run;
raw_spin_unlock_irqrestore(&table->lock, flags); return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
ir_data->irq_2_irte.index, entry);
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
return 0;
} }
EXPORT_SYMBOL(amd_iommu_update_ga); EXPORT_SYMBOL(amd_iommu_update_ga);
#endif #endif
...@@ -894,6 +894,12 @@ static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu, ...@@ -894,6 +894,12 @@ static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
{ {
int index; int index;
if (cmds->num == CMDQ_BATCH_ENTRIES - 1 &&
(smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC)) {
arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true);
cmds->num = 0;
}
if (cmds->num == CMDQ_BATCH_ENTRIES) { if (cmds->num == CMDQ_BATCH_ENTRIES) {
arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, false); arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, false);
cmds->num = 0; cmds->num = 0;
...@@ -1892,8 +1898,13 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd, ...@@ -1892,8 +1898,13 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
/* Convert page size of 12,14,16 (log2) to 1,2,3 */ /* Convert page size of 12,14,16 (log2) to 1,2,3 */
cmd->tlbi.tg = (tg - 10) / 2; cmd->tlbi.tg = (tg - 10) / 2;
/* Determine what level the granule is at */ /*
cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3)); * Determine what level the granule is at. For non-leaf, io-pgtable
* assumes .tlb_flush_walk can invalidate multiple levels at once,
* so ignore the nominal last-level granule and leave TTL=0.
*/
if (cmd->tlbi.leaf)
cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
num_pages = size >> tg; num_pages = size >> tg;
} }
...@@ -2008,6 +2019,7 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap) ...@@ -2008,6 +2019,7 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
/* Assume that a coherent TCU implies coherent TBUs */ /* Assume that a coherent TCU implies coherent TBUs */
return master->smmu->features & ARM_SMMU_FEAT_COHERENCY; return master->smmu->features & ARM_SMMU_FEAT_COHERENCY;
case IOMMU_CAP_NOEXEC: case IOMMU_CAP_NOEXEC:
case IOMMU_CAP_DEFERRED_FLUSH:
return true; return true;
default: default:
return false; return false;
...@@ -2023,7 +2035,6 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) ...@@ -2023,7 +2035,6 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
if (type != IOMMU_DOMAIN_UNMANAGED && if (type != IOMMU_DOMAIN_UNMANAGED &&
type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_DMA &&
type != IOMMU_DOMAIN_DMA_FQ &&
type != IOMMU_DOMAIN_IDENTITY) type != IOMMU_DOMAIN_IDENTITY)
return NULL; return NULL;
...@@ -3429,6 +3440,44 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) ...@@ -3429,6 +3440,44 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
return 0; return 0;
} }
#define IIDR_IMPLEMENTER_ARM 0x43b
#define IIDR_PRODUCTID_ARM_MMU_600 0x483
#define IIDR_PRODUCTID_ARM_MMU_700 0x487
static void arm_smmu_device_iidr_probe(struct arm_smmu_device *smmu)
{
u32 reg;
unsigned int implementer, productid, variant, revision;
reg = readl_relaxed(smmu->base + ARM_SMMU_IIDR);
implementer = FIELD_GET(IIDR_IMPLEMENTER, reg);
productid = FIELD_GET(IIDR_PRODUCTID, reg);
variant = FIELD_GET(IIDR_VARIANT, reg);
revision = FIELD_GET(IIDR_REVISION, reg);
switch (implementer) {
case IIDR_IMPLEMENTER_ARM:
switch (productid) {
case IIDR_PRODUCTID_ARM_MMU_600:
/* Arm erratum 1076982 */
if (variant == 0 && revision <= 2)
smmu->features &= ~ARM_SMMU_FEAT_SEV;
/* Arm erratum 1209401 */
if (variant < 2)
smmu->features &= ~ARM_SMMU_FEAT_NESTING;
break;
case IIDR_PRODUCTID_ARM_MMU_700:
/* Arm erratum 2812531 */
smmu->features &= ~ARM_SMMU_FEAT_BTM;
smmu->options |= ARM_SMMU_OPT_CMDQ_FORCE_SYNC;
/* Arm errata 2268618, 2812531 */
smmu->features &= ~ARM_SMMU_FEAT_NESTING;
break;
}
break;
}
}
static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
{ {
u32 reg; u32 reg;
...@@ -3635,6 +3684,12 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) ...@@ -3635,6 +3684,12 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
smmu->ias = max(smmu->ias, smmu->oas); smmu->ias = max(smmu->ias, smmu->oas);
if ((smmu->features & ARM_SMMU_FEAT_TRANS_S1) &&
(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
smmu->features |= ARM_SMMU_FEAT_NESTING;
arm_smmu_device_iidr_probe(smmu);
if (arm_smmu_sva_supported(smmu)) if (arm_smmu_sva_supported(smmu))
smmu->features |= ARM_SMMU_FEAT_SVA; smmu->features |= ARM_SMMU_FEAT_SVA;
......
...@@ -69,6 +69,12 @@ ...@@ -69,6 +69,12 @@
#define IDR5_VAX GENMASK(11, 10) #define IDR5_VAX GENMASK(11, 10)
#define IDR5_VAX_52_BIT 1 #define IDR5_VAX_52_BIT 1
#define ARM_SMMU_IIDR 0x18
#define IIDR_PRODUCTID GENMASK(31, 20)
#define IIDR_VARIANT GENMASK(19, 16)
#define IIDR_REVISION GENMASK(15, 12)
#define IIDR_IMPLEMENTER GENMASK(11, 0)
#define ARM_SMMU_CR0 0x20 #define ARM_SMMU_CR0 0x20
#define CR0_ATSCHK (1 << 4) #define CR0_ATSCHK (1 << 4)
#define CR0_CMDQEN (1 << 3) #define CR0_CMDQEN (1 << 3)
...@@ -639,11 +645,13 @@ struct arm_smmu_device { ...@@ -639,11 +645,13 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_BTM (1 << 16) #define ARM_SMMU_FEAT_BTM (1 << 16)
#define ARM_SMMU_FEAT_SVA (1 << 17) #define ARM_SMMU_FEAT_SVA (1 << 17)
#define ARM_SMMU_FEAT_E2H (1 << 18) #define ARM_SMMU_FEAT_E2H (1 << 18)
#define ARM_SMMU_FEAT_NESTING (1 << 19)
u32 features; u32 features;
#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0) #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
#define ARM_SMMU_OPT_PAGE0_REGS_ONLY (1 << 1) #define ARM_SMMU_OPT_PAGE0_REGS_ONLY (1 << 1)
#define ARM_SMMU_OPT_MSIPOLL (1 << 2) #define ARM_SMMU_OPT_MSIPOLL (1 << 2)
#define ARM_SMMU_OPT_CMDQ_FORCE_SYNC (1 << 3)
u32 options; u32 options;
struct arm_smmu_cmdq cmdq; struct arm_smmu_cmdq cmdq;
......
...@@ -856,8 +856,7 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) ...@@ -856,8 +856,7 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
struct arm_smmu_domain *smmu_domain; struct arm_smmu_domain *smmu_domain;
if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_IDENTITY) { if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_IDENTITY) {
if (using_legacy_binding || if (using_legacy_binding || type != IOMMU_DOMAIN_DMA)
(type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_DMA_FQ))
return NULL; return NULL;
} }
/* /*
...@@ -1325,6 +1324,7 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap) ...@@ -1325,6 +1324,7 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK || return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK ||
device_get_dma_attr(dev) == DEV_DMA_COHERENT; device_get_dma_attr(dev) == DEV_DMA_COHERENT;
case IOMMU_CAP_NOEXEC: case IOMMU_CAP_NOEXEC:
case IOMMU_CAP_DEFERRED_FLUSH:
return true; return true;
default: default:
return false; return false;
......
...@@ -615,7 +615,8 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, ...@@ -615,7 +615,8 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
goto done_unlock; goto done_unlock;
/* If the FQ fails we can simply fall back to strict mode */ /* If the FQ fails we can simply fall back to strict mode */
if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain)) if (domain->type == IOMMU_DOMAIN_DMA_FQ &&
(!device_iommu_capable(dev, IOMMU_CAP_DEFERRED_FLUSH) || iommu_dma_init_fq(domain)))
domain->type = IOMMU_DOMAIN_DMA; domain->type = IOMMU_DOMAIN_DMA;
ret = iova_reserve_iommu_regions(dev, domain); ret = iova_reserve_iommu_regions(dev, domain);
......
...@@ -334,17 +334,6 @@ int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu) ...@@ -334,17 +334,6 @@ int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu)
return ret; return ret;
} }
static struct iommu_group *get_device_iommu_group(struct device *dev)
{
struct iommu_group *group;
group = iommu_group_get(dev);
if (!group)
group = iommu_group_alloc();
return group;
}
static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl) static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
{ {
u32 version; u32 version;
...@@ -356,94 +345,52 @@ static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl) ...@@ -356,94 +345,52 @@ static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
return version >= 0x204; return version >= 0x204;
} }
/* Get iommu group information from peer devices or devices on the parent bus */ static struct iommu_group *fsl_pamu_device_group(struct device *dev)
static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
{ {
struct pci_dev *tmp;
struct iommu_group *group; struct iommu_group *group;
struct pci_bus *bus = pdev->bus; struct pci_dev *pdev;
/* /*
* Traverese the pci bus device list to get * For platform devices we allocate a separate group for each of the
* the shared iommu group. * devices.
*/ */
while (bus) { if (!dev_is_pci(dev))
list_for_each_entry(tmp, &bus->devices, bus_list) { return generic_device_group(dev);
if (tmp == pdev)
continue;
group = iommu_group_get(&tmp->dev);
if (group)
return group;
}
bus = bus->parent; /*
} * We can partition PCIe devices so assign device group to the device
*/
return NULL; pdev = to_pci_dev(dev);
} if (check_pci_ctl_endpt_part(pci_bus_to_host(pdev->bus)))
return pci_device_group(&pdev->dev);
static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
{
struct pci_controller *pci_ctl;
bool pci_endpt_partitioning;
struct iommu_group *group = NULL;
pci_ctl = pci_bus_to_host(pdev->bus);
pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl);
/* We can partition PCIe devices so assign device group to the device */
if (pci_endpt_partitioning) {
group = pci_device_group(&pdev->dev);
/*
* PCIe controller is not a paritionable entity
* free the controller device iommu_group.
*/
if (pci_ctl->parent->iommu_group)
iommu_group_remove_device(pci_ctl->parent);
} else {
/*
* All devices connected to the controller will share the
* PCI controllers device group. If this is the first
* device to be probed for the pci controller, copy the
* device group information from the PCI controller device
* node and remove the PCI controller iommu group.
* For subsequent devices, the iommu group information can
* be obtained from sibling devices (i.e. from the bus_devices
* link list).
*/
if (pci_ctl->parent->iommu_group) {
group = get_device_iommu_group(pci_ctl->parent);
iommu_group_remove_device(pci_ctl->parent);
} else {
group = get_shared_pci_device_group(pdev);
}
}
if (!group)
group = ERR_PTR(-ENODEV);
/*
* All devices connected to the controller will share the same device
* group.
*
* Due to ordering between fsl_pamu_init() and fsl_pci_init() it is
* guaranteed that the pci_ctl->parent platform_device will have the
* iommu driver bound and will already have a group set. So we just
* re-use this group as the group for every device in the hose.
*/
group = iommu_group_get(pci_bus_to_host(pdev->bus)->parent);
if (WARN_ON(!group))
return ERR_PTR(-EINVAL);
return group; return group;
} }
static struct iommu_group *fsl_pamu_device_group(struct device *dev) static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
{ {
struct iommu_group *group = ERR_PTR(-ENODEV);
int len; int len;
/* /*
* For platform devices we allocate a separate group for * uboot must fill the fsl,liodn for platform devices to be supported by
* each of the devices. * the iommu.
*/ */
if (dev_is_pci(dev)) if (!dev_is_pci(dev) &&
group = get_pci_device_group(to_pci_dev(dev)); !of_get_property(dev->of_node, "fsl,liodn", &len))
else if (of_get_property(dev->of_node, "fsl,liodn", &len)) return ERR_PTR(-ENODEV);
group = get_device_iommu_group(dev);
return group;
}
static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
{
return &pamu_iommu; return &pamu_iommu;
} }
......
...@@ -1185,7 +1185,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) ...@@ -1185,7 +1185,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
{ {
struct root_entry *root; struct root_entry *root;
root = (struct root_entry *)alloc_pgtable_page(iommu->node, GFP_ATOMIC); root = alloc_pgtable_page(iommu->node, GFP_ATOMIC);
if (!root) { if (!root) {
pr_err("Allocating root entry for %s failed\n", pr_err("Allocating root entry for %s failed\n",
iommu->name); iommu->name);
...@@ -1312,15 +1312,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, ...@@ -1312,15 +1312,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
iommu->name, type); iommu->name, type);
return; return;
} }
/* Note: set drain read/write */
#if 0
/*
* This is probably to be super secure.. Looks like we can
* ignore it without any impact.
*/
if (cap_read_drain(iommu->cap))
val |= DMA_TLB_READ_DRAIN;
#endif
if (cap_write_drain(iommu->cap)) if (cap_write_drain(iommu->cap))
val |= DMA_TLB_WRITE_DRAIN; val |= DMA_TLB_WRITE_DRAIN;
...@@ -1897,8 +1889,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, ...@@ -1897,8 +1889,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
struct context_entry *context; struct context_entry *context;
int ret; int ret;
WARN_ON(did == 0);
if (hw_pass_through && domain_type_is_si(domain)) if (hw_pass_through && domain_type_is_si(domain))
translation = CONTEXT_TT_PASS_THROUGH; translation = CONTEXT_TT_PASS_THROUGH;
...@@ -1944,8 +1934,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, ...@@ -1944,8 +1934,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
if (sm_supported(iommu)) { if (sm_supported(iommu)) {
unsigned long pds; unsigned long pds;
WARN_ON(!table);
/* Setup the PASID DIR pointer: */ /* Setup the PASID DIR pointer: */
pds = context_get_sm_pds(table); pds = context_get_sm_pds(table);
context->lo = (u64)virt_to_phys(table->table) | context->lo = (u64)virt_to_phys(table->table) |
...@@ -2967,10 +2955,15 @@ static int init_iommu_hw(void) ...@@ -2967,10 +2955,15 @@ static int init_iommu_hw(void)
{ {
struct dmar_drhd_unit *drhd; struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu = NULL; struct intel_iommu *iommu = NULL;
int ret;
for_each_active_iommu(iommu, drhd) for_each_active_iommu(iommu, drhd) {
if (iommu->qi) if (iommu->qi) {
dmar_reenable_qi(iommu); ret = dmar_reenable_qi(iommu);
if (ret)
return ret;
}
}
for_each_iommu(iommu, drhd) { for_each_iommu(iommu, drhd) {
if (drhd->ignored) { if (drhd->ignored) {
...@@ -4064,7 +4057,6 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) ...@@ -4064,7 +4057,6 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
case IOMMU_DOMAIN_BLOCKED: case IOMMU_DOMAIN_BLOCKED:
return &blocking_domain; return &blocking_domain;
case IOMMU_DOMAIN_DMA: case IOMMU_DOMAIN_DMA:
case IOMMU_DOMAIN_DMA_FQ:
case IOMMU_DOMAIN_UNMANAGED: case IOMMU_DOMAIN_UNMANAGED:
dmar_domain = alloc_domain(type); dmar_domain = alloc_domain(type);
if (!dmar_domain) { if (!dmar_domain) {
...@@ -4369,6 +4361,7 @@ static bool intel_iommu_capable(struct device *dev, enum iommu_cap cap) ...@@ -4369,6 +4361,7 @@ static bool intel_iommu_capable(struct device *dev, enum iommu_cap cap)
switch (cap) { switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY: case IOMMU_CAP_CACHE_COHERENCY:
case IOMMU_CAP_DEFERRED_FLUSH:
return true; return true;
case IOMMU_CAP_PRE_BOOT_PROTECTION: case IOMMU_CAP_PRE_BOOT_PROTECTION:
return dmar_platform_optin(); return dmar_platform_optin();
......
This diff is collapsed.
...@@ -647,7 +647,13 @@ struct iova_rcache { ...@@ -647,7 +647,13 @@ struct iova_rcache {
static struct iova_magazine *iova_magazine_alloc(gfp_t flags) static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
{ {
return kzalloc(sizeof(struct iova_magazine), flags); struct iova_magazine *mag;
mag = kmalloc(sizeof(*mag), flags);
if (mag)
mag->size = 0;
return mag;
} }
static void iova_magazine_free(struct iova_magazine *mag) static void iova_magazine_free(struct iova_magazine *mag)
......
...@@ -788,6 +788,29 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev) ...@@ -788,6 +788,29 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
return 0; return 0;
} }
static void viommu_detach_dev(struct viommu_endpoint *vdev)
{
int i;
struct virtio_iommu_req_detach req;
struct viommu_domain *vdomain = vdev->vdomain;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(vdev->dev);
if (!vdomain)
return;
req = (struct virtio_iommu_req_detach) {
.head.type = VIRTIO_IOMMU_T_DETACH,
.domain = cpu_to_le32(vdomain->id),
};
for (i = 0; i < fwspec->num_ids; i++) {
req.endpoint = cpu_to_le32(fwspec->ids[i]);
WARN_ON(viommu_send_req_sync(vdev->viommu, &req, sizeof(req)));
}
vdomain->nr_endpoints--;
vdev->vdomain = NULL;
}
static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova, static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount, phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped) int prot, gfp_t gfp, size_t *mapped)
...@@ -810,25 +833,26 @@ static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova, ...@@ -810,25 +833,26 @@ static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
if (ret) if (ret)
return ret; return ret;
map = (struct virtio_iommu_req_map) { if (vdomain->nr_endpoints) {
.head.type = VIRTIO_IOMMU_T_MAP, map = (struct virtio_iommu_req_map) {
.domain = cpu_to_le32(vdomain->id), .head.type = VIRTIO_IOMMU_T_MAP,
.virt_start = cpu_to_le64(iova), .domain = cpu_to_le32(vdomain->id),
.phys_start = cpu_to_le64(paddr), .virt_start = cpu_to_le64(iova),
.virt_end = cpu_to_le64(end), .phys_start = cpu_to_le64(paddr),
.flags = cpu_to_le32(flags), .virt_end = cpu_to_le64(end),
}; .flags = cpu_to_le32(flags),
};
if (!vdomain->nr_endpoints)
return 0;
ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
if (ret) if (ret) {
viommu_del_mappings(vdomain, iova, end); viommu_del_mappings(vdomain, iova, end);
else if (mapped) return ret;
}
}
if (mapped)
*mapped = size; *mapped = size;
return ret; return 0;
} }
static size_t viommu_unmap_pages(struct iommu_domain *domain, unsigned long iova, static size_t viommu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
...@@ -990,6 +1014,7 @@ static void viommu_release_device(struct device *dev) ...@@ -990,6 +1014,7 @@ static void viommu_release_device(struct device *dev)
{ {
struct viommu_endpoint *vdev = dev_iommu_priv_get(dev); struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
viommu_detach_dev(vdev);
iommu_put_resv_regions(dev, &vdev->resv_regions); iommu_put_resv_regions(dev, &vdev->resv_regions);
kfree(vdev); kfree(vdev);
} }
......
...@@ -65,6 +65,7 @@ struct iommu_domain_geometry { ...@@ -65,6 +65,7 @@ struct iommu_domain_geometry {
#define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */ #define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */
#define IOMMU_DOMAIN_ALLOC_FLAGS ~__IOMMU_DOMAIN_DMA_FQ
/* /*
* This are the possible domain-types * This are the possible domain-types
* *
...@@ -127,6 +128,11 @@ enum iommu_cap { ...@@ -127,6 +128,11 @@ enum iommu_cap {
* this device. * this device.
*/ */
IOMMU_CAP_ENFORCE_CACHE_COHERENCY, IOMMU_CAP_ENFORCE_CACHE_COHERENCY,
/*
* IOMMU driver does not issue TLB maintenance during .unmap, so can
* usefully support the non-strict DMA flush queue.
*/
IOMMU_CAP_DEFERRED_FLUSH,
}; };
/* These are the possible reserved region types */ /* These are the possible reserved region types */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment