Re: [PATCH V2 03/11] iommufd: Allow binding to a noiommu device
From: Mostafa Saleh
Date: Sun Mar 22 2026 - 05:54:37 EST
On Thu, Mar 12, 2026 at 08:56:29AM -0700, Jacob Pan wrote:
> From: Jason Gunthorpe <jgg@xxxxxxxxxx>
>
> Allow iommufd to bind devices without an IOMMU (noiommu mode) by creating
> a dummy IOMMU group for such devices and skipping hwpt operations.
>
> This enables noiommu devices to operate through the same iommufd API as IOMMU-
> capable devices.
>
> Signed-off-by: Jason Gunthorpe <jgg@xxxxxxxxxx>
> Signed-off-by: Jacob Pan <jacob.pan@xxxxxxxxxxxxxxxxxxx>
> ---
> drivers/iommu/iommufd/device.c | 113 ++++++++++++++++++++++-----------
> 1 file changed, 76 insertions(+), 37 deletions(-)
>
> diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
> index 54d73016468f..c38d3efa3d6f 100644
> --- a/drivers/iommu/iommufd/device.c
> +++ b/drivers/iommu/iommufd/device.c
> @@ -23,6 +23,11 @@ struct iommufd_attach {
> struct xarray device_array;
> };
>
> +static bool is_vfio_noiommu(struct iommufd_device *idev)
> +{
> + return !device_iommu_mapped(idev->dev) || !idev->dev->iommu;
Do this need to check for CONFIG_VFIO_NOIOMMU and maybe the module
param enable_unsafe_noiommu_mode similar to the legacy implemenation?
> +}
> +
> static void iommufd_group_release(struct kref *kref)
> {
> struct iommufd_group *igroup =
> @@ -205,32 +210,17 @@ void iommufd_device_destroy(struct iommufd_object *obj)
> struct iommufd_device *idev =
> container_of(obj, struct iommufd_device, obj);
>
> - iommu_device_release_dma_owner(idev->dev);
> + if (!is_vfio_noiommu(idev))
> + iommu_device_release_dma_owner(idev->dev);
> iommufd_put_group(idev->igroup);
> if (!iommufd_selftest_is_mock_dev(idev->dev))
> iommufd_ctx_put(idev->ictx);
> }
>
> -/**
> - * iommufd_device_bind - Bind a physical device to an iommu fd
> - * @ictx: iommufd file descriptor
> - * @dev: Pointer to a physical device struct
> - * @id: Output ID number to return to userspace for this device
> - *
> - * A successful bind establishes an ownership over the device and returns
> - * struct iommufd_device pointer, otherwise returns error pointer.
> - *
> - * A driver using this API must set driver_managed_dma and must not touch
> - * the device until this routine succeeds and establishes ownership.
> - *
> - * Binding a PCI device places the entire RID under iommufd control.
> - *
> - * The caller must undo this with iommufd_device_unbind()
> - */
> -struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
> - struct device *dev, u32 *id)
> +static int iommufd_bind_iommu(struct iommufd_device *idev)
> {
> - struct iommufd_device *idev;
> + struct iommufd_ctx *ictx = idev->ictx;
> + struct device *dev = idev->dev;
> struct iommufd_group *igroup;
> int rc;
>
> @@ -239,11 +229,11 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
> * to restore cache coherency.
> */
> if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY))
> - return ERR_PTR(-EINVAL);
> + return -EINVAL;
>
> - igroup = iommufd_get_group(ictx, dev);
> + igroup = iommufd_get_group(idev->ictx, dev);
> if (IS_ERR(igroup))
> - return ERR_CAST(igroup);
> + return PTR_ERR(igroup);
>
> /*
> * For historical compat with VFIO the insecure interrupt path is
> @@ -269,21 +259,66 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
> if (rc)
> goto out_group_put;
>
> + /* igroup refcount moves into iommufd_device */
> + idev->igroup = igroup;
> + return 0;
> +
> +out_group_put:
> + iommufd_put_group(igroup);
> + return rc;
> +}
> +
> +/**
> + * iommufd_device_bind - Bind a physical device to an iommu fd
> + * @ictx: iommufd file descriptor
> + * @dev: Pointer to a physical device struct
> + * @id: Output ID number to return to userspace for this device
> + *
> + * A successful bind establishes an ownership over the device and returns
> + * struct iommufd_device pointer, otherwise returns error pointer.
> + *
> + * A driver using this API must set driver_managed_dma and must not touch
> + * the device until this routine succeeds and establishes ownership.
> + *
> + * Binding a PCI device places the entire RID under iommufd control.
> + *
> + * The caller must undo this with iommufd_device_unbind()
> + */
> +struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
> + struct device *dev, u32 *id)
> +{
> + struct iommufd_device *idev;
> + int rc;
> +
> idev = iommufd_object_alloc(ictx, idev, IOMMUFD_OBJ_DEVICE);
The next code introduces new error cases, do that need to be cleaned in
that case by calling iommufd_object_abort_and_destroy()?
Thanks,
Mostafa
> - if (IS_ERR(idev)) {
> - rc = PTR_ERR(idev);
> - goto out_release_owner;
> - }
> + if (IS_ERR(idev))
> + return idev;
> idev->ictx = ictx;
> - if (!iommufd_selftest_is_mock_dev(dev))
> - iommufd_ctx_get(ictx);
> idev->dev = dev;
> idev->enforce_cache_coherency =
> device_iommu_capable(dev, IOMMU_CAP_ENFORCE_CACHE_COHERENCY);
> +
> + if (!is_vfio_noiommu(idev)) {
> + rc = iommufd_bind_iommu(idev);
> + if (rc)
> + return ERR_PTR(rc);
> + } else {
> + struct iommufd_group *igroup;
> +
> + /*
> + * Create a dummy igroup, lots of stuff expects ths igroup to be
> + * present, but a NULL igroup->group is OK
> + */
> + igroup = iommufd_alloc_group(ictx, NULL);
> + if (IS_ERR(igroup))
> + return ERR_CAST(igroup);
> + idev->igroup = igroup;
> + }
> +
> + if (!iommufd_selftest_is_mock_dev(dev))
> + iommufd_ctx_get(ictx);
> /* The calling driver is a user until iommufd_device_unbind() */
> refcount_inc(&idev->obj.users);
> - /* igroup refcount moves into iommufd_device */
> - idev->igroup = igroup;
>
> /*
> * If the caller fails after this success it must call
> @@ -295,11 +330,6 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
> *id = idev->obj.id;
> return idev;
>
> -out_release_owner:
> - iommu_device_release_dma_owner(dev);
> -out_group_put:
> - iommufd_put_group(igroup);
> - return ERR_PTR(rc);
> }
> EXPORT_SYMBOL_NS_GPL(iommufd_device_bind, "IOMMUFD");
>
> @@ -513,6 +543,9 @@ static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
> struct iommufd_attach_handle *handle;
> int rc;
>
> + if (is_vfio_noiommu(idev))
> + return 0;
> +
> if (!iommufd_hwpt_compatible_device(hwpt, idev))
> return -EINVAL;
>
> @@ -560,6 +593,9 @@ static void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt,
> {
> struct iommufd_attach_handle *handle;
>
> + if (is_vfio_noiommu(idev))
> + return;
> +
> handle = iommufd_device_get_attach_handle(idev, pasid);
> if (pasid == IOMMU_NO_PASID)
> iommu_detach_group_handle(hwpt->domain, idev->igroup->group);
> @@ -578,6 +614,9 @@ static int iommufd_hwpt_replace_device(struct iommufd_device *idev,
> struct iommufd_attach_handle *handle, *old_handle;
> int rc;
>
> + if (is_vfio_noiommu(idev))
> + return 0;
> +
> if (!iommufd_hwpt_compatible_device(hwpt, idev))
> return -EINVAL;
>
> @@ -653,7 +692,7 @@ int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
> goto err_release_devid;
> }
>
> - if (attach_resv) {
> + if (attach_resv && !is_vfio_noiommu(idev)) {
> rc = iommufd_device_attach_reserved_iova(idev, hwpt_paging);
> if (rc)
> goto err_release_devid;
> --
> 2.34.1
>