diff options
| author | Rob Clark <robin.clark@oss.qualcomm.com> | 2026-03-16 11:44:42 -0700 |
|---|---|---|
| committer | Rob Clark <robin.clark@oss.qualcomm.com> | 2026-03-31 13:47:27 -0700 |
| commit | 8a7023b035355ef5bfa096bd323256fa8abbbc6a (patch) | |
| tree | 445783569b2445d4d0fabfeab8a5c8969289fe97 /drivers/gpu | |
| parent | 9d24ec327690d8b27190331386319a5c98ec61e7 (diff) | |
drm/msm/vma: Avoid lock in VM_BIND fence signaling path
Use msm_gem_unpin_active(), similar to what is used in the GEM_SUBMIT
path. This avoids needing to hold the obj lock, and the end result is
the same. (As with GEM_SUBMIT, we know the fence isn't signaled yet.)
Reported-by: Akhil P Oommen <akhilpo@oss.qualcomm.com>
Fixes: 2e6a8a1fe2b2 ("drm/msm: Add VM_BIND ioctl")
Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com>
Patchwork: https://patchwork.freedesktop.org/patch/712230/
Message-ID: <20260316184442.673558-1-robin.clark@oss.qualcomm.com>
Diffstat (limited to 'drivers/gpu')
| -rw-r--r-- | drivers/gpu/drm/msm/msm_gem.c | 3 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/msm_gem_vma.c | 9 |
2 files changed, 9 insertions, 3 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index da74f1413f94..74636646d80e 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -507,8 +507,11 @@ void msm_gem_unpin_locked(struct drm_gem_object *obj) */ void msm_gem_unpin_active(struct drm_gem_object *obj) { + struct msm_drm_private *priv = obj->dev->dev_private; struct msm_gem_object *msm_obj = to_msm_bo(obj); + GEM_WARN_ON(!mutex_is_locked(&priv->lru.lock)); + msm_obj->pin_count--; GEM_WARN_ON(msm_obj->pin_count < 0); update_lru_active(obj); diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 3a6c435d5105..1c6b0920c0d8 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -696,6 +696,7 @@ static struct dma_fence * msm_vma_job_run(struct drm_sched_job *_job) { struct msm_vm_bind_job *job = to_msm_vm_bind_job(_job); + struct msm_drm_private *priv = job->vm->drm->dev_private; struct msm_gem_vm *vm = to_msm_vm(job->vm); struct drm_gem_object *obj; int ret = vm->unusable ? -EINVAL : 0; @@ -738,12 +739,14 @@ msm_vma_job_run(struct drm_sched_job *_job) if (ret) msm_gem_vm_unusable(job->vm); + mutex_lock(&priv->lru.lock); + job_foreach_bo (obj, job) { - msm_gem_lock(obj); - msm_gem_unpin_locked(obj); - msm_gem_unlock(obj); + msm_gem_unpin_active(obj); } + mutex_unlock(&priv->lru.lock); + /* VM_BIND ops are synchronous, so no fence to wait on: */ return NULL; } |
