drm/msm: Drop priv->lastctx
[ Upstream commit 1d054c9b8457b56a651109fac21f56f46ccd46b2 ] cur_ctx_seqno already does the same thing, but handles the edge cases where a refcnt'd context can live after lastclose. So let's not have two ways to do the same thing. Signed-off-by: Rob Clark <robdclark@chromium.org> Reviewed-by: Akhil P Oommen <akhilpo@codeaurora.org> Link: https://lore.kernel.org/r/20211109181117.591148-3-robdclark@gmail.com Signed-off-by: Rob Clark <robdclark@chromium.org> Stable-dep-of: a30f9f65b5ac ("drm/msm/a5xx: workaround early ring-buffer emptiness check") Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
20d019dcd9
commit
77084c27bb
10 changed files with 22 additions and 35 deletions
|
@ -12,7 +12,6 @@ static bool a2xx_idle(struct msm_gpu *gpu);
|
|||
|
||||
static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||
{
|
||||
struct msm_drm_private *priv = gpu->dev->dev_private;
|
||||
struct msm_ringbuffer *ring = submit->ring;
|
||||
unsigned int i;
|
||||
|
||||
|
@ -23,7 +22,7 @@ static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
|||
break;
|
||||
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
||||
/* ignore if there has not been a ctx switch: */
|
||||
if (priv->lastctx == submit->queue->ctx)
|
||||
if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
|
||||
break;
|
||||
fallthrough;
|
||||
case MSM_SUBMIT_CMD_BUF:
|
||||
|
|
|
@ -30,7 +30,6 @@ static bool a3xx_idle(struct msm_gpu *gpu);
|
|||
|
||||
static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||
{
|
||||
struct msm_drm_private *priv = gpu->dev->dev_private;
|
||||
struct msm_ringbuffer *ring = submit->ring;
|
||||
unsigned int i;
|
||||
|
||||
|
@ -41,7 +40,7 @@ static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
|||
break;
|
||||
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
||||
/* ignore if there has not been a ctx switch: */
|
||||
if (priv->lastctx == submit->queue->ctx)
|
||||
if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
|
||||
break;
|
||||
fallthrough;
|
||||
case MSM_SUBMIT_CMD_BUF:
|
||||
|
|
|
@ -24,7 +24,6 @@ static bool a4xx_idle(struct msm_gpu *gpu);
|
|||
|
||||
static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||
{
|
||||
struct msm_drm_private *priv = gpu->dev->dev_private;
|
||||
struct msm_ringbuffer *ring = submit->ring;
|
||||
unsigned int i;
|
||||
|
||||
|
@ -35,7 +34,7 @@ static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
|||
break;
|
||||
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
||||
/* ignore if there has not been a ctx switch: */
|
||||
if (priv->lastctx == submit->queue->ctx)
|
||||
if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
|
||||
break;
|
||||
fallthrough;
|
||||
case MSM_SUBMIT_CMD_BUF:
|
||||
|
|
|
@ -56,7 +56,6 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
|
|||
|
||||
static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||
{
|
||||
struct msm_drm_private *priv = gpu->dev->dev_private;
|
||||
struct msm_ringbuffer *ring = submit->ring;
|
||||
struct msm_gem_object *obj;
|
||||
uint32_t *ptr, dwords;
|
||||
|
@ -67,7 +66,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
|
|||
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
|
||||
break;
|
||||
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
||||
if (priv->lastctx == submit->queue->ctx)
|
||||
if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
|
||||
break;
|
||||
fallthrough;
|
||||
case MSM_SUBMIT_CMD_BUF:
|
||||
|
@ -117,12 +116,11 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
|||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
|
||||
struct msm_drm_private *priv = gpu->dev->dev_private;
|
||||
struct msm_ringbuffer *ring = submit->ring;
|
||||
unsigned int i, ibs = 0;
|
||||
|
||||
if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
|
||||
priv->lastctx = NULL;
|
||||
gpu->cur_ctx_seqno = 0;
|
||||
a5xx_submit_in_rb(gpu, submit);
|
||||
return;
|
||||
}
|
||||
|
@ -161,7 +159,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
|||
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
|
||||
break;
|
||||
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
||||
if (priv->lastctx == submit->queue->ctx)
|
||||
if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
|
||||
break;
|
||||
fallthrough;
|
||||
case MSM_SUBMIT_CMD_BUF:
|
||||
|
|
|
@ -99,7 +99,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
|
|||
u32 asid;
|
||||
u64 memptr = rbmemptr(ring, ttbr0);
|
||||
|
||||
if (ctx->seqno == a6xx_gpu->cur_ctx_seqno)
|
||||
if (ctx->seqno == a6xx_gpu->base.base.cur_ctx_seqno)
|
||||
return;
|
||||
|
||||
if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
|
||||
|
@ -131,14 +131,11 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
|
|||
|
||||
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
|
||||
OUT_RING(ring, 0x31);
|
||||
|
||||
a6xx_gpu->cur_ctx_seqno = ctx->seqno;
|
||||
}
|
||||
|
||||
static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||
{
|
||||
unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
|
||||
struct msm_drm_private *priv = gpu->dev->dev_private;
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
||||
struct msm_ringbuffer *ring = submit->ring;
|
||||
|
@ -170,7 +167,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
|||
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
|
||||
break;
|
||||
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
||||
if (priv->lastctx == submit->queue->ctx)
|
||||
if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
|
||||
break;
|
||||
fallthrough;
|
||||
case MSM_SUBMIT_CMD_BUF:
|
||||
|
@ -887,7 +884,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
|
|||
/* Always come up on rb 0 */
|
||||
a6xx_gpu->cur_ring = gpu->rb[0];
|
||||
|
||||
a6xx_gpu->cur_ctx_seqno = 0;
|
||||
gpu->cur_ctx_seqno = 0;
|
||||
|
||||
/* Enable the SQE_to start the CP engine */
|
||||
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
|
||||
|
|
|
@ -20,16 +20,6 @@ struct a6xx_gpu {
|
|||
|
||||
struct msm_ringbuffer *cur_ring;
|
||||
|
||||
/**
|
||||
* cur_ctx_seqno:
|
||||
*
|
||||
* The ctx->seqno value of the context with current pgtables
|
||||
* installed. Tracked by seqno rather than pointer value to
|
||||
* avoid dangling pointers, and cases where a ctx can be freed
|
||||
* and a new one created with the same address.
|
||||
*/
|
||||
int cur_ctx_seqno;
|
||||
|
||||
struct a6xx_gmu gmu;
|
||||
|
||||
struct drm_gem_object *shadow_bo;
|
||||
|
|
|
@ -626,14 +626,8 @@ static void context_close(struct msm_file_private *ctx)
|
|||
|
||||
static void msm_postclose(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_file_private *ctx = file->driver_priv;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (ctx == priv->lastctx)
|
||||
priv->lastctx = NULL;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
context_close(ctx);
|
||||
}
|
||||
|
||||
|
|
|
@ -165,7 +165,7 @@ struct msm_drm_private {
|
|||
|
||||
/* when we have more than one 'msm_gpu' these need to be an array: */
|
||||
struct msm_gpu *gpu;
|
||||
struct msm_file_private *lastctx;
|
||||
|
||||
/* gpu is only set on open(), but we need this info earlier */
|
||||
bool is_a2xx;
|
||||
|
||||
|
|
|
@ -795,7 +795,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
|||
}
|
||||
|
||||
gpu->funcs->submit(gpu, submit);
|
||||
priv->lastctx = submit->queue->ctx;
|
||||
gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
|
||||
|
||||
hangcheck_timer_reset(gpu);
|
||||
}
|
||||
|
|
|
@ -94,6 +94,17 @@ struct msm_gpu {
|
|||
struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
|
||||
int nr_rings;
|
||||
|
||||
/**
|
||||
* cur_ctx_seqno:
|
||||
*
|
||||
* The ctx->seqno value of the last context to submit rendering,
|
||||
* and the one with current pgtables installed (for generations
|
||||
* that support per-context pgtables). Tracked by seqno rather
|
||||
* than pointer value to avoid dangling pointers, and cases where
|
||||
* a ctx can be freed and a new one created with the same address.
|
||||
*/
|
||||
int cur_ctx_seqno;
|
||||
|
||||
/*
|
||||
* List of GEM active objects on this gpu. Protected by
|
||||
* msm_drm_private::mm_lock
|
||||
|
|
Loading…
Reference in a new issue