diff options
| author | Tejas Upadhyay <tejas.upadhyay@intel.com> | 2026-03-05 17:49:04 +0530 |
|---|---|---|
| committer | Tejas Upadhyay <tejas.upadhyay@intel.com> | 2026-03-23 15:23:24 +0530 |
| commit | 4e7ebff69aed345f65f590a17b3119c0cb5eadde (patch) | |
| tree | e4234712515e796818914f90f2fb09da29d66540 | |
| parent | 61e7649a1a253609769063a30018e68b970324d6 (diff) | |
drm/xe/xe3p_lpg: flush shrinker bo cachelines manually
XA, new pat_index introduced post xe3p_lpg, is memory shared between the
CPU and GPU is treated differently from other GPU memory when the Media
engine is power-gated.
XA is *always* flushed, like at the end-of-submssion (and maybe other
places), just that internally as an optimisation hw doesn't need to make
that a full flush (which will also include XA) when Media is
off/powergated, since it doesn't need to worry about GT caches vs Media
coherency, and only CPU vs GPU coherency, so can make that flush a
targeted XA flush, since stuff tagged with XA now means it's shared with
the CPU. The main implication is that we now need to somehow flush non-XA
before freeing system memory pages, otherwise dirty cachelines could be
flushed after the free (like if Media suddenly turns on and does a full
flush)
V4: Add comments for L2 flush path
V3(Thomas/MattA/MattR): Restrict userptr with non-xa, then no need to
flush manually
V2(MattA): Expand commit description
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patch.msgid.link/20260305121902.1892593-7-tejas.upadhyay@intel.com
Signed-off-by: Tejas Upadhyay <tejas.upadhyay@intel.com>
| -rw-r--r-- | drivers/gpu/drm/xe/xe_bo.c | 7 | ||||
| -rw-r--r-- | drivers/gpu/drm/xe/xe_device.c | 23 | ||||
| -rw-r--r-- | drivers/gpu/drm/xe/xe_device.h | 1 |
3 files changed, 30 insertions, 1 deletions
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 22179b2df85c..216e1d8635f4 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -689,7 +689,12 @@ static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo, if (!xe_vm_in_fault_mode(vm)) { drm_gpuvm_bo_evict(vm_bo, true); - continue; + /* + * L2 cache may not be flushed, so ensure that is done in + * xe_vm_invalidate_vma() below + */ + if (!xe_device_is_l2_flush_optimized(xe)) + continue; } if (!idle) { diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index e77a3a3db73d..daf2c815082c 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -1094,6 +1094,29 @@ static void tdf_request_sync(struct xe_device *xe) } } +/** + * xe_device_is_l2_flush_optimized - if L2 flush is optimized by HW + * @xe: The device to check. + * + * Return: true if the HW device optimizing L2 flush, false otherwise. + */ +bool xe_device_is_l2_flush_optimized(struct xe_device *xe) +{ + /* XA is *always* flushed, like at the end-of-submssion (and maybe other + * places), just that internally as an optimisation hw doesn't need to make + * that a full flush (which will also include XA) when Media is + * off/powergated, since it doesn't need to worry about GT caches vs Media + * coherency, and only CPU vs GPU coherency, so can make that flush a + * targeted XA flush, since stuff tagged with XA now means it's shared with + * the CPU. The main implication is that we now need to somehow flush non-XA before + * freeing system memory pages, otherwise dirty cachelines could be flushed after the free + * (like if Media suddenly turns on and does a full flush) + */ + if (GRAPHICS_VER(xe) >= 35 && !IS_DGFX(xe)) + return true; + return false; +} + void xe_device_l2_flush(struct xe_device *xe) { struct xe_gt *gt; diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h index c4d267002661..e4b9de8d8e95 100644 --- a/drivers/gpu/drm/xe/xe_device.h +++ b/drivers/gpu/drm/xe/xe_device.h @@ -188,6 +188,7 @@ void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p); u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address); u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address); +bool xe_device_is_l2_flush_optimized(struct xe_device *xe); void xe_device_td_flush(struct xe_device *xe); void xe_device_l2_flush(struct xe_device *xe); |
