Commit 5d308fc1 authored by Lu Baolu's avatar Lu Baolu Committed by Joerg Roedel

iommu/vt-d: Add 256-bit invalidation descriptor support

Intel vt-d spec rev3.0 requires software to use 256-bit
descriptors in invalidation queue. As the spec reads in
section 6.5.2:

Remapping hardware supporting Scalable Mode Translations
(ECAP_REG.SMTS=1) allow software to additionally program
the width of the descriptors (128-bits or 256-bits) that
will be written into the Queue. Software should setup the
Invalidation Queue for 256-bit descriptors before progra-
mming remapping hardware for scalable-mode translation as
128-bit descriptors are treated as invalid descriptors
(see Table 21 in Section 6.5.2.10) in scalable-mode.

This patch adds 256-bit invalidation descriptor support
if the hardware presents scalable mode capability.

Cc: Ashok Raj <ashok.raj@intel.com>
Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
Cc: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: default avatarSanjay Kumar <sanjay.k.kumar@intel.com>
Signed-off-by: default avatarLiu Yi L <yi.l.liu@intel.com>
Signed-off-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 4f2ed183
...@@ -1160,6 +1160,7 @@ static int qi_check_fault(struct intel_iommu *iommu, int index) ...@@ -1160,6 +1160,7 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
int head, tail; int head, tail;
struct q_inval *qi = iommu->qi; struct q_inval *qi = iommu->qi;
int wait_index = (index + 1) % QI_LENGTH; int wait_index = (index + 1) % QI_LENGTH;
int shift = qi_shift(iommu);
if (qi->desc_status[wait_index] == QI_ABORT) if (qi->desc_status[wait_index] == QI_ABORT)
return -EAGAIN; return -EAGAIN;
...@@ -1173,13 +1174,19 @@ static int qi_check_fault(struct intel_iommu *iommu, int index) ...@@ -1173,13 +1174,19 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
*/ */
if (fault & DMA_FSTS_IQE) { if (fault & DMA_FSTS_IQE) {
head = readl(iommu->reg + DMAR_IQH_REG); head = readl(iommu->reg + DMAR_IQH_REG);
if ((head >> DMAR_IQ_SHIFT) == index) { if ((head >> shift) == index) {
pr_err("VT-d detected invalid descriptor: " struct qi_desc *desc = qi->desc + head;
"low=%llx, high=%llx\n",
(unsigned long long)qi->desc[index].low, /*
(unsigned long long)qi->desc[index].high); * desc->qw2 and desc->qw3 are either reserved or
memcpy(&qi->desc[index], &qi->desc[wait_index], * used by software as private data. We won't print
sizeof(struct qi_desc)); * out these two qw's for security consideration.
*/
pr_err("VT-d detected invalid descriptor: qw0 = %llx, qw1 = %llx\n",
(unsigned long long)desc->qw0,
(unsigned long long)desc->qw1);
memcpy(desc, qi->desc + (wait_index << shift),
1 << shift);
writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG); writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
return -EINVAL; return -EINVAL;
} }
...@@ -1191,10 +1198,10 @@ static int qi_check_fault(struct intel_iommu *iommu, int index) ...@@ -1191,10 +1198,10 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
*/ */
if (fault & DMA_FSTS_ITE) { if (fault & DMA_FSTS_ITE) {
head = readl(iommu->reg + DMAR_IQH_REG); head = readl(iommu->reg + DMAR_IQH_REG);
head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH; head = ((head >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
head |= 1; head |= 1;
tail = readl(iommu->reg + DMAR_IQT_REG); tail = readl(iommu->reg + DMAR_IQT_REG);
tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH; tail = ((tail >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG); writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
...@@ -1222,15 +1229,14 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) ...@@ -1222,15 +1229,14 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
{ {
int rc; int rc;
struct q_inval *qi = iommu->qi; struct q_inval *qi = iommu->qi;
struct qi_desc *hw, wait_desc; int offset, shift, length;
struct qi_desc wait_desc;
int wait_index, index; int wait_index, index;
unsigned long flags; unsigned long flags;
if (!qi) if (!qi)
return 0; return 0;
hw = qi->desc;
restart: restart:
rc = 0; rc = 0;
...@@ -1243,16 +1249,21 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) ...@@ -1243,16 +1249,21 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
index = qi->free_head; index = qi->free_head;
wait_index = (index + 1) % QI_LENGTH; wait_index = (index + 1) % QI_LENGTH;
shift = qi_shift(iommu);
length = 1 << shift;
qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE; qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
hw[index] = *desc; offset = index << shift;
memcpy(qi->desc + offset, desc, length);
wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) | wait_desc.qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
QI_IWD_STATUS_WRITE | QI_IWD_TYPE; QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); wait_desc.qw1 = virt_to_phys(&qi->desc_status[wait_index]);
wait_desc.qw2 = 0;
wait_desc.qw3 = 0;
hw[wait_index] = wait_desc; offset = wait_index << shift;
memcpy(qi->desc + offset, &wait_desc, length);
qi->free_head = (qi->free_head + 2) % QI_LENGTH; qi->free_head = (qi->free_head + 2) % QI_LENGTH;
qi->free_cnt -= 2; qi->free_cnt -= 2;
...@@ -1261,7 +1272,7 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) ...@@ -1261,7 +1272,7 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
* update the HW tail register indicating the presence of * update the HW tail register indicating the presence of
* new descriptors. * new descriptors.
*/ */
writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG); writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
while (qi->desc_status[wait_index] != QI_DONE) { while (qi->desc_status[wait_index] != QI_DONE) {
/* /*
...@@ -1298,8 +1309,10 @@ void qi_global_iec(struct intel_iommu *iommu) ...@@ -1298,8 +1309,10 @@ void qi_global_iec(struct intel_iommu *iommu)
{ {
struct qi_desc desc; struct qi_desc desc;
desc.low = QI_IEC_TYPE; desc.qw0 = QI_IEC_TYPE;
desc.high = 0; desc.qw1 = 0;
desc.qw2 = 0;
desc.qw3 = 0;
/* should never fail */ /* should never fail */
qi_submit_sync(&desc, iommu); qi_submit_sync(&desc, iommu);
...@@ -1310,9 +1323,11 @@ void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, ...@@ -1310,9 +1323,11 @@ void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
{ {
struct qi_desc desc; struct qi_desc desc;
desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did) desc.qw0 = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
| QI_CC_GRAN(type) | QI_CC_TYPE; | QI_CC_GRAN(type) | QI_CC_TYPE;
desc.high = 0; desc.qw1 = 0;
desc.qw2 = 0;
desc.qw3 = 0;
qi_submit_sync(&desc, iommu); qi_submit_sync(&desc, iommu);
} }
...@@ -1331,10 +1346,12 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, ...@@ -1331,10 +1346,12 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
if (cap_read_drain(iommu->cap)) if (cap_read_drain(iommu->cap))
dr = 1; dr = 1;
desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw) desc.qw0 = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
| QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE; | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) desc.qw1 = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
| QI_IOTLB_AM(size_order); | QI_IOTLB_AM(size_order);
desc.qw2 = 0;
desc.qw3 = 0;
qi_submit_sync(&desc, iommu); qi_submit_sync(&desc, iommu);
} }
...@@ -1347,15 +1364,17 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, ...@@ -1347,15 +1364,17 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
if (mask) { if (mask) {
WARN_ON_ONCE(addr & ((1ULL << (VTD_PAGE_SHIFT + mask)) - 1)); WARN_ON_ONCE(addr & ((1ULL << (VTD_PAGE_SHIFT + mask)) - 1));
addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1; addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE; desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
} else } else
desc.high = QI_DEV_IOTLB_ADDR(addr); desc.qw1 = QI_DEV_IOTLB_ADDR(addr);
if (qdep >= QI_DEV_IOTLB_MAX_INVS) if (qdep >= QI_DEV_IOTLB_MAX_INVS)
qdep = 0; qdep = 0;
desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) | desc.qw0 = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid); QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
desc.qw2 = 0;
desc.qw3 = 0;
qi_submit_sync(&desc, iommu); qi_submit_sync(&desc, iommu);
} }
...@@ -1403,16 +1422,24 @@ static void __dmar_enable_qi(struct intel_iommu *iommu) ...@@ -1403,16 +1422,24 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
u32 sts; u32 sts;
unsigned long flags; unsigned long flags;
struct q_inval *qi = iommu->qi; struct q_inval *qi = iommu->qi;
u64 val = virt_to_phys(qi->desc);
qi->free_head = qi->free_tail = 0; qi->free_head = qi->free_tail = 0;
qi->free_cnt = QI_LENGTH; qi->free_cnt = QI_LENGTH;
/*
* Set DW=1 and QS=1 in IQA_REG when Scalable Mode capability
* is present.
*/
if (ecap_smts(iommu->ecap))
val |= (1 << 11) | 1;
raw_spin_lock_irqsave(&iommu->register_lock, flags); raw_spin_lock_irqsave(&iommu->register_lock, flags);
/* write zero to the tail reg */ /* write zero to the tail reg */
writel(0, iommu->reg + DMAR_IQT_REG); writel(0, iommu->reg + DMAR_IQT_REG);
dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc)); dmar_writeq(iommu->reg + DMAR_IQA_REG, val);
iommu->gcmd |= DMA_GCMD_QIE; iommu->gcmd |= DMA_GCMD_QIE;
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
...@@ -1448,8 +1475,12 @@ int dmar_enable_qi(struct intel_iommu *iommu) ...@@ -1448,8 +1475,12 @@ int dmar_enable_qi(struct intel_iommu *iommu)
qi = iommu->qi; qi = iommu->qi;
/*
desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0); * Need two pages to accommodate 256 descriptors of 256 bits each
* if the remapping hardware supports scalable mode translation.
*/
desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
!!ecap_smts(iommu->ecap));
if (!desc_page) { if (!desc_page) {
kfree(qi); kfree(qi);
iommu->qi = NULL; iommu->qi = NULL;
......
...@@ -161,27 +161,40 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d ...@@ -161,27 +161,40 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
* because that's the only option the hardware gives us. Despite * because that's the only option the hardware gives us. Despite
* the fact that they are actually only accessible through one. */ * the fact that they are actually only accessible through one. */
if (gl) if (gl)
desc.low = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) | desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) | QI_EIOTLB_TYPE; QI_EIOTLB_DID(sdev->did) |
QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) |
QI_EIOTLB_TYPE;
else else
desc.low = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) | desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | QI_EIOTLB_TYPE; QI_EIOTLB_DID(sdev->did) |
desc.high = 0; QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
QI_EIOTLB_TYPE;
desc.qw1 = 0;
} else { } else {
int mask = ilog2(__roundup_pow_of_two(pages)); int mask = ilog2(__roundup_pow_of_two(pages));
desc.low = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) | desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) | QI_EIOTLB_TYPE; QI_EIOTLB_DID(sdev->did) |
desc.high = QI_EIOTLB_ADDR(address) | QI_EIOTLB_GL(gl) | QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
QI_EIOTLB_IH(ih) | QI_EIOTLB_AM(mask); QI_EIOTLB_TYPE;
} desc.qw1 = QI_EIOTLB_ADDR(address) |
QI_EIOTLB_GL(gl) |
QI_EIOTLB_IH(ih) |
QI_EIOTLB_AM(mask);
}
desc.qw2 = 0;
desc.qw3 = 0;
qi_submit_sync(&desc, svm->iommu); qi_submit_sync(&desc, svm->iommu);
if (sdev->dev_iotlb) { if (sdev->dev_iotlb) {
desc.low = QI_DEV_EIOTLB_PASID(svm->pasid) | QI_DEV_EIOTLB_SID(sdev->sid) | desc.qw0 = QI_DEV_EIOTLB_PASID(svm->pasid) |
QI_DEV_EIOTLB_QDEP(sdev->qdep) | QI_DEIOTLB_TYPE; QI_DEV_EIOTLB_SID(sdev->sid) |
QI_DEV_EIOTLB_QDEP(sdev->qdep) |
QI_DEIOTLB_TYPE;
if (pages == -1) { if (pages == -1) {
desc.high = QI_DEV_EIOTLB_ADDR(-1ULL >> 1) | QI_DEV_EIOTLB_SIZE; desc.qw1 = QI_DEV_EIOTLB_ADDR(-1ULL >> 1) |
QI_DEV_EIOTLB_SIZE;
} else if (pages > 1) { } else if (pages > 1) {
/* The least significant zero bit indicates the size. So, /* The least significant zero bit indicates the size. So,
* for example, an "address" value of 0x12345f000 will * for example, an "address" value of 0x12345f000 will
...@@ -189,10 +202,13 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d ...@@ -189,10 +202,13 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
unsigned long last = address + ((unsigned long)(pages - 1) << VTD_PAGE_SHIFT); unsigned long last = address + ((unsigned long)(pages - 1) << VTD_PAGE_SHIFT);
unsigned long mask = __rounddown_pow_of_two(address ^ last); unsigned long mask = __rounddown_pow_of_two(address ^ last);
desc.high = QI_DEV_EIOTLB_ADDR((address & ~mask) | (mask - 1)) | QI_DEV_EIOTLB_SIZE; desc.qw1 = QI_DEV_EIOTLB_ADDR((address & ~mask) |
(mask - 1)) | QI_DEV_EIOTLB_SIZE;
} else { } else {
desc.high = QI_DEV_EIOTLB_ADDR(address); desc.qw1 = QI_DEV_EIOTLB_ADDR(address);
} }
desc.qw2 = 0;
desc.qw3 = 0;
qi_submit_sync(&desc, svm->iommu); qi_submit_sync(&desc, svm->iommu);
} }
} }
...@@ -237,8 +253,11 @@ static void intel_flush_pasid_dev(struct intel_svm *svm, struct intel_svm_dev *s ...@@ -237,8 +253,11 @@ static void intel_flush_pasid_dev(struct intel_svm *svm, struct intel_svm_dev *s
{ {
struct qi_desc desc; struct qi_desc desc;
desc.high = 0; desc.qw0 = QI_PC_TYPE | QI_PC_DID(sdev->did) |
desc.low = QI_PC_TYPE | QI_PC_DID(sdev->did) | QI_PC_PASID_SEL | QI_PC_PASID(pasid); QI_PC_PASID_SEL | QI_PC_PASID(pasid);
desc.qw1 = 0;
desc.qw2 = 0;
desc.qw3 = 0;
qi_submit_sync(&desc, svm->iommu); qi_submit_sync(&desc, svm->iommu);
} }
...@@ -667,24 +686,27 @@ static irqreturn_t prq_event_thread(int irq, void *d) ...@@ -667,24 +686,27 @@ static irqreturn_t prq_event_thread(int irq, void *d)
no_pasid: no_pasid:
if (req->lpig) { if (req->lpig) {
/* Page Group Response */ /* Page Group Response */
resp.low = QI_PGRP_PASID(req->pasid) | resp.qw0 = QI_PGRP_PASID(req->pasid) |
QI_PGRP_DID((req->bus << 8) | req->devfn) | QI_PGRP_DID((req->bus << 8) | req->devfn) |
QI_PGRP_PASID_P(req->pasid_present) | QI_PGRP_PASID_P(req->pasid_present) |
QI_PGRP_RESP_TYPE; QI_PGRP_RESP_TYPE;
resp.high = QI_PGRP_IDX(req->prg_index) | resp.qw1 = QI_PGRP_IDX(req->prg_index) |
QI_PGRP_PRIV(req->private) | QI_PGRP_RESP_CODE(result); QI_PGRP_PRIV(req->private) |
QI_PGRP_RESP_CODE(result);
qi_submit_sync(&resp, iommu);
} else if (req->srr) { } else if (req->srr) {
/* Page Stream Response */ /* Page Stream Response */
resp.low = QI_PSTRM_IDX(req->prg_index) | resp.qw0 = QI_PSTRM_IDX(req->prg_index) |
QI_PSTRM_PRIV(req->private) | QI_PSTRM_BUS(req->bus) | QI_PSTRM_PRIV(req->private) |
QI_PSTRM_PASID(req->pasid) | QI_PSTRM_RESP_TYPE; QI_PSTRM_BUS(req->bus) |
resp.high = QI_PSTRM_ADDR(address) | QI_PSTRM_DEVFN(req->devfn) | QI_PSTRM_PASID(req->pasid) |
QI_PSTRM_RESP_TYPE;
resp.qw1 = QI_PSTRM_ADDR(address) |
QI_PSTRM_DEVFN(req->devfn) |
QI_PSTRM_RESP_CODE(result); QI_PSTRM_RESP_CODE(result);
qi_submit_sync(&resp, iommu);
} }
resp.qw2 = 0;
resp.qw3 = 0;
qi_submit_sync(&resp, iommu);
head = (head + sizeof(*req)) & PRQ_RING_MASK; head = (head + sizeof(*req)) & PRQ_RING_MASK;
} }
......
...@@ -145,9 +145,11 @@ static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) ...@@ -145,9 +145,11 @@ static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
{ {
struct qi_desc desc; struct qi_desc desc;
desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask) desc.qw0 = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
| QI_IEC_SELECTIVE; | QI_IEC_SELECTIVE;
desc.high = 0; desc.qw1 = 0;
desc.qw2 = 0;
desc.qw3 = 0;
return qi_submit_sync(&desc, iommu); return qi_submit_sync(&desc, iommu);
} }
......
...@@ -401,13 +401,18 @@ enum { ...@@ -401,13 +401,18 @@ enum {
#define QI_GRAN_NONG_PASID 2 #define QI_GRAN_NONG_PASID 2
#define QI_GRAN_PSI_PASID 3 #define QI_GRAN_PSI_PASID 3
#define qi_shift(iommu) (DMAR_IQ_SHIFT + !!ecap_smts((iommu)->ecap))
struct qi_desc { struct qi_desc {
u64 low, high; u64 qw0;
u64 qw1;
u64 qw2;
u64 qw3;
}; };
struct q_inval { struct q_inval {
raw_spinlock_t q_lock; raw_spinlock_t q_lock;
struct qi_desc *desc; /* invalidation queue */ void *desc; /* invalidation queue */
int *desc_status; /* desc status */ int *desc_status; /* desc status */
int free_head; /* first free entry */ int free_head; /* first free entry */
int free_tail; /* last free entry */ int free_tail; /* last free entry */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment