Revert "drm/msm: Add priv->mm_lock to protect active/inactive lists"

This reverts commit 20d019dcd9.
This commit is contained in:
Ksawlii 2024-11-24 00:23:25 +01:00
parent c18d247bc2
commit a33260418a
6 changed files with 14 additions and 58 deletions

View file

@ -113,11 +113,6 @@ static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
int ret;
ret = mutex_lock_interruptible(&priv->mm_lock);
if (ret)
return ret;
if (gpu) {
seq_printf(m, "Active Objects (%s):\n", gpu->name);
@ -127,8 +122,6 @@ static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
seq_printf(m, "Inactive Objects:\n");
msm_gem_describe_objects(&priv->inactive_list, m);
mutex_unlock(&priv->mm_lock);
return 0;
}

View file

@ -7,7 +7,6 @@
#include <linux/dma-mapping.h>
#include <linux/kthread.h>
#include <linux/sched/mm.h>
#include <linux/uaccess.h>
#include <uapi/linux/sched/types.h>
@ -443,12 +442,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
init_llist_head(&priv->free_list);
INIT_LIST_HEAD(&priv->inactive_list);
mutex_init(&priv->mm_lock);
/* Teach lockdep about lock ordering wrt. shrinker: */
fs_reclaim_acquire(GFP_KERNEL);
might_lock(&priv->mm_lock);
fs_reclaim_release(GFP_KERNEL);
drm_mode_config_init(ddev);

View file

@ -175,19 +175,8 @@ struct msm_drm_private {
struct msm_rd_state *hangrd; /* debugfs to dump hanging submits */
struct msm_perf_state *perf;
/*
* List of inactive GEM objects. Every bo is either in the inactive_list
* or gpu->active_list (for the gpu it is active on[1])
*
* These lists are protected by mm_lock. If struct_mutex is involved, it
* should be aquired prior to mm_lock. One should *not* hold mm_lock in
* get_pages()/vmap()/etc paths, as they can trigger the shrinker.
*
* [1] if someone ever added support for the old 2d cores, there could be
* more than one gpu object
*/
/* list of GEM objects: */
struct list_head inactive_list;
struct mutex mm_lock;
/* worker for delayed free of objects: */
struct work_struct free_work;

View file

@ -745,17 +745,13 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = obj->dev->dev_private;
might_sleep();
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
if (!atomic_fetch_inc(&msm_obj->active_count)) {
mutex_lock(&priv->mm_lock);
msm_obj->gpu = gpu;
list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &gpu->active_list);
mutex_unlock(&priv->mm_lock);
}
}
@ -764,14 +760,12 @@ void msm_gem_active_put(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = obj->dev->dev_private;
might_sleep();
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
if (!atomic_dec_return(&msm_obj->active_count)) {
mutex_lock(&priv->mm_lock);
msm_obj->gpu = NULL;
list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
mutex_unlock(&priv->mm_lock);
}
}
@ -927,16 +921,13 @@ static void free_object(struct msm_gem_object *msm_obj)
{
struct drm_gem_object *obj = &msm_obj->base;
struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = dev->dev_private;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
/* object should not be on active list: */
WARN_ON(is_active(msm_obj));
mutex_lock(&priv->mm_lock);
list_del(&msm_obj->mm_list);
mutex_unlock(&priv->mm_lock);
mutex_lock(&msm_obj->lock);
@ -1112,9 +1103,14 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
}
mutex_lock(&priv->mm_lock);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
mutex_unlock(&priv->mm_lock);
if (struct_mutex_locked) {
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
} else {
mutex_lock(&dev->struct_mutex);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
mutex_unlock(&dev->struct_mutex);
}
return obj;
@ -1178,9 +1174,9 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
mutex_unlock(&msm_obj->lock);
mutex_lock(&priv->mm_lock);
mutex_lock(&dev->struct_mutex);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
mutex_unlock(&priv->mm_lock);
mutex_unlock(&dev->struct_mutex);
return obj;

View file

@ -51,15 +51,11 @@ msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
if (!msm_gem_shrinker_lock(dev, &unlock))
return 0;
mutex_lock(&priv->mm_lock);
list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
if (is_purgeable(msm_obj))
count += msm_obj->base.size >> PAGE_SHIFT;
}
mutex_unlock(&priv->mm_lock);
if (unlock)
mutex_unlock(&dev->struct_mutex);
@ -79,8 +75,6 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
if (!msm_gem_shrinker_lock(dev, &unlock))
return SHRINK_STOP;
mutex_lock(&priv->mm_lock);
list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
if (freed >= sc->nr_to_scan)
break;
@ -90,8 +84,6 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
}
}
mutex_unlock(&priv->mm_lock);
if (unlock)
mutex_unlock(&dev->struct_mutex);
@ -114,8 +106,6 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
if (!msm_gem_shrinker_lock(dev, &unlock))
return NOTIFY_DONE;
mutex_lock(&priv->mm_lock);
list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
if (is_vunmapable(msm_obj)) {
msm_gem_vunmap(&msm_obj->base, OBJ_LOCK_SHRINKER);
@ -128,8 +118,6 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
}
}
mutex_unlock(&priv->mm_lock);
if (unlock)
mutex_unlock(&dev->struct_mutex);

View file

@ -94,10 +94,7 @@ struct msm_gpu {
struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
int nr_rings;
/*
* List of GEM active objects on this gpu. Protected by
* msm_drm_private::mm_lock
*/
/* list of GEM active objects: */
struct list_head active_list;
/* does gpu need hw_init? */