summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-04-15 15:05:51 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2026-04-15 15:05:51 -0700
commitf1d26d72f01556c787b1291729aa7a2ce37656a8 (patch)
tree4224e18dd0191fd60cafbdee781266a2c42e06cb /include/linux
parent5a69195686d5b874ac5a4c7f809ecb75fbc535ef (diff)
parentf8d5e7066d846c92ecac245134baf8a207becb65 (diff)
Merge tag 'iommu-updates-v7.1' of git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux
Pull iommu updates from Joerg Roedel: "Core: - Support for RISC-V IO-page-table format in generic iommupt code ARM-SMMU Updates: - Introduction of an "invalidation array" for SMMUv3, which enables future scalability work and optimisations for devices with a large number of SMMUv3 instances - Update the conditions under which the SMMUv3 driver works around hardware errata for invalidation on MMU-700 implementations - Fix broken command filtering for the host view of NVIDIA's "cmdqv" SMMUv3 extension - MMU-500 device-tree binding additions for Qualcomm Eliza & Hawi SoCs Intel VT-d: - Support for dirty tracking on domains attached to PASID - Removal of unnecessary read*()/write*() wrappers - Improvements to the invalidation paths AMD Vi: - Race-condition fixed in debugfs code - Make log buffer allocation NUMA aware RISC-V: - IO-TLB flushing improvements - Minor fixes" * tag 'iommu-updates-v7.1' of git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux: (48 commits) iommu/vt-d: Restore IOMMU_CAP_CACHE_COHERENCY dt-bindings: arm-smmu: qcom: Add compatible for Hawi SoC iommu/amd: Invalidate IRT cache for DMA aliases iommu/riscv: Remove overflows on the invalidation path iommu/amd: Fix clone_alias() to use the original device's devid iommu/vt-d: Remove the remaining pages along the invalidation path iommu/vt-d: Pass size_order to qi_desc_piotlb() not npages iommu/vt-d: Split piotlb invalidation into range and all iommu/vt-d: Remove dmar_writel() and dmar_writeq() iommu/vt-d: Remove dmar_readl() and dmar_readq() iommufd/selftest: Test dirty tracking on PASID iommu/vt-d: Support dirty tracking on PASID iommu/vt-d: Rename device_set_dirty_tracking() and pass dmar_domain pointer iommu/vt-d: Block PASID attachment to nested domain with dirty tracking iommu/dma: Always allow DMA-FQ when iommupt provides the iommu_domain iommu/riscv: Fix signedness bug iommu/amd: Fix illegal cap/mmio access in IOMMU debugfs iommu/amd: Fix illegal device-id access in IOMMU debugfs iommu/tegra241-cmdqv: Update uAPI to clarify HYP_OWN requirement iommu/tegra241-cmdqv: Set supports_cmd op in tegra241_vcmdq_hw_init() ...
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/generic_pt/common.h16
-rw-r--r--include/linux/generic_pt/iommu.h80
-rw-r--r--include/linux/iommu.h3
3 files changed, 88 insertions, 11 deletions
diff --git a/include/linux/generic_pt/common.h b/include/linux/generic_pt/common.h
index 6a9a1acb5aad..fc5d0b5edadc 100644
--- a/include/linux/generic_pt/common.h
+++ b/include/linux/generic_pt/common.h
@@ -175,6 +175,22 @@ enum {
PT_FEAT_VTDSS_FORCE_WRITEABLE,
};
+struct pt_riscv_32 {
+ struct pt_common common;
+};
+
+struct pt_riscv_64 {
+ struct pt_common common;
+};
+
+enum {
+ /*
+ * Support the 64k contiguous page size following the Svnapot extension.
+ */
+ PT_FEAT_RISCV_SVNAPOT_64K = PT_FEAT_FMT_START,
+
+};
+
struct pt_x86_64 {
struct pt_common common;
};
diff --git a/include/linux/generic_pt/iommu.h b/include/linux/generic_pt/iommu.h
index 9eefbb74efd0..dd0edd02a48a 100644
--- a/include/linux/generic_pt/iommu.h
+++ b/include/linux/generic_pt/iommu.h
@@ -66,6 +66,13 @@ struct pt_iommu {
struct device *iommu_device;
};
+static inline struct pt_iommu *iommupt_from_domain(struct iommu_domain *domain)
+{
+ if (!IS_ENABLED(CONFIG_IOMMU_PT) || !domain->is_iommupt)
+ return NULL;
+ return container_of(domain, struct pt_iommu, domain);
+}
+
/**
* struct pt_iommu_info - Details about the IOMMU page table
*
@@ -81,6 +88,56 @@ struct pt_iommu_info {
struct pt_iommu_ops {
/**
+ * @map_range: Install translation for an IOVA range
+ * @iommu_table: Table to manipulate
+ * @iova: IO virtual address to start
+ * @paddr: Physical/Output address to start
+ * @len: Length of the range starting from @iova
+ * @prot: A bitmap of IOMMU_READ/WRITE/CACHE/NOEXEC/MMIO
+ * @gfp: GFP flags for any memory allocations
+ *
+ * The range starting at IOVA will have paddr installed into it. The
+ * rage is automatically segmented into optimally sized table entries,
+ * and can have any valid alignment.
+ *
+ * On error the caller will probably want to invoke unmap on the range
+ * from iova up to the amount indicated by @mapped to return the table
+ * back to an unchanged state.
+ *
+ * Context: The caller must hold a write range lock that includes
+ * the whole range.
+ *
+ * Returns: -ERRNO on failure, 0 on success. The number of bytes of VA
+ * that were mapped are added to @mapped, @mapped is not zerod first.
+ */
+ int (*map_range)(struct pt_iommu *iommu_table, dma_addr_t iova,
+ phys_addr_t paddr, dma_addr_t len, unsigned int prot,
+ gfp_t gfp, size_t *mapped);
+
+ /**
+ * @unmap_range: Make a range of IOVA empty/not present
+ * @iommu_table: Table to manipulate
+ * @iova: IO virtual address to start
+ * @len: Length of the range starting from @iova
+ * @iotlb_gather: Gather struct that must be flushed on return
+ *
+ * unmap_range() will remove a translation created by map_range(). It
+ * cannot subdivide a mapping created by map_range(), so it should be
+ * called with IOVA ranges that match those passed to map_pages. The
+ * IOVA range can aggregate contiguous map_range() calls so long as no
+ * individual range is split.
+ *
+ * Context: The caller must hold a write range lock that includes
+ * the whole range.
+ *
+ * Returns: Number of bytes of VA unmapped. iova + res will be the
+ * point unmapping stopped.
+ */
+ size_t (*unmap_range)(struct pt_iommu *iommu_table, dma_addr_t iova,
+ dma_addr_t len,
+ struct iommu_iotlb_gather *iotlb_gather);
+
+ /**
* @set_dirty: Make the iova write dirty
* @iommu_table: Table to manipulate
* @iova: IO virtual address to start
@@ -194,14 +251,6 @@ struct pt_iommu_cfg {
#define IOMMU_PROTOTYPES(fmt) \
phys_addr_t pt_iommu_##fmt##_iova_to_phys(struct iommu_domain *domain, \
dma_addr_t iova); \
- int pt_iommu_##fmt##_map_pages(struct iommu_domain *domain, \
- unsigned long iova, phys_addr_t paddr, \
- size_t pgsize, size_t pgcount, \
- int prot, gfp_t gfp, size_t *mapped); \
- size_t pt_iommu_##fmt##_unmap_pages( \
- struct iommu_domain *domain, unsigned long iova, \
- size_t pgsize, size_t pgcount, \
- struct iommu_iotlb_gather *iotlb_gather); \
int pt_iommu_##fmt##_read_and_clear_dirty( \
struct iommu_domain *domain, unsigned long iova, size_t size, \
unsigned long flags, struct iommu_dirty_bitmap *dirty); \
@@ -222,9 +271,7 @@ struct pt_iommu_cfg {
* iommu_pt
*/
#define IOMMU_PT_DOMAIN_OPS(fmt) \
- .iova_to_phys = &pt_iommu_##fmt##_iova_to_phys, \
- .map_pages = &pt_iommu_##fmt##_map_pages, \
- .unmap_pages = &pt_iommu_##fmt##_unmap_pages
+ .iova_to_phys = &pt_iommu_##fmt##_iova_to_phys
#define IOMMU_PT_DIRTY_OPS(fmt) \
.read_and_clear_dirty = &pt_iommu_##fmt##_read_and_clear_dirty
@@ -275,6 +322,17 @@ struct pt_iommu_vtdss_hw_info {
IOMMU_FORMAT(vtdss, vtdss_pt);
+struct pt_iommu_riscv_64_cfg {
+ struct pt_iommu_cfg common;
+};
+
+struct pt_iommu_riscv_64_hw_info {
+ u64 ppn;
+ u8 fsc_iosatp_mode;
+};
+
+IOMMU_FORMAT(riscv_64, riscv_64pt);
+
struct pt_iommu_x86_64_cfg {
struct pt_iommu_cfg common;
/* 4 is a 57 bit 5 level table */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 555597b54083..e587d4ac4d33 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -223,6 +223,7 @@ enum iommu_domain_cookie_type {
struct iommu_domain {
unsigned type;
enum iommu_domain_cookie_type cookie_type;
+ bool is_iommupt;
const struct iommu_domain_ops *ops;
const struct iommu_dirty_ops *dirty_ops;
const struct iommu_ops *owner; /* Whose domain_alloc we came from */
@@ -271,6 +272,8 @@ enum iommu_cap {
*/
IOMMU_CAP_DEFERRED_FLUSH,
IOMMU_CAP_DIRTY_TRACKING, /* IOMMU supports dirty tracking */
+ /* ATS is supported and may be enabled for this device */
+ IOMMU_CAP_PCI_ATS_SUPPORTED,
};
/* These are the possible reserved region types */