Skip to content

Commit 2225cfe

Browse files
committed
drm/gem: Use kref_get_unless_zero for the weak mmap references
Compared to wrapping the final kref_put with dev->struct_mutex this allows us to only acquire the offset manager look both in the final cleanup and in the lookup. Which has the upside that no locks leak out of the core abstractions. But it means that we need to hold a temporary reference to the object while checking mmap constraints, to make sure the object doesn't disappear. Extended the critical region would have worked too, but would result in more leaky locking. Also, this is the final bit which required dev->struct_mutex in gem core, now modern drivers can be completely struct_mutex free! This needs a new drm_vma_offset_exact_lookup_locked and makes both drm_vma_offset_exact_lookup and drm_vma_offset_lookup unused. v2: Don't leak object references in failure paths (David). v3: Add a comment from Chris explaining how the ordering works, with the slight adjustment that I dropped any mention of struct_mutex since with this patch it's now immaterial ot core gem. Cc: David Herrmann <dh.herrmann@gmail.com> Reviewed-by: David Herrmann <dh.herrmann@gmail.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://mid.gmane.org/1444901623-18918-1-git-send-email-daniel.vetter@ffwll.ch Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
1 parent 3d57b42 commit 2225cfe

File tree

3 files changed

+46
-56
lines changed

3 files changed

+46
-56
lines changed

drivers/gpu/drm/drm_gem.c

Lines changed: 28 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -862,30 +862,46 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
862862
{
863863
struct drm_file *priv = filp->private_data;
864864
struct drm_device *dev = priv->minor->dev;
865-
struct drm_gem_object *obj;
865+
struct drm_gem_object *obj = NULL;
866866
struct drm_vma_offset_node *node;
867867
int ret;
868868

869869
if (drm_device_is_unplugged(dev))
870870
return -ENODEV;
871871

872-
mutex_lock(&dev->struct_mutex);
872+
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
873+
node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
874+
vma->vm_pgoff,
875+
vma_pages(vma));
876+
if (likely(node)) {
877+
obj = container_of(node, struct drm_gem_object, vma_node);
878+
/*
879+
* When the object is being freed, after it hits 0-refcnt it
880+
* proceeds to tear down the object. In the process it will
881+
* attempt to remove the VMA offset and so acquire this
882+
* mgr->vm_lock. Therefore if we find an object with a 0-refcnt
883+
* that matches our range, we know it is in the process of being
884+
* destroyed and will be freed as soon as we release the lock -
885+
* so we have to check for the 0-refcnted object and treat it as
886+
* invalid.
887+
*/
888+
if (!kref_get_unless_zero(&obj->refcount))
889+
obj = NULL;
890+
}
891+
drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
873892

874-
node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
875-
vma->vm_pgoff,
876-
vma_pages(vma));
877-
if (!node) {
878-
mutex_unlock(&dev->struct_mutex);
893+
if (!obj)
879894
return -EINVAL;
880-
} else if (!drm_vma_node_is_allowed(node, filp)) {
881-
mutex_unlock(&dev->struct_mutex);
895+
896+
if (!drm_vma_node_is_allowed(node, filp)) {
897+
drm_gem_object_unreference_unlocked(obj);
882898
return -EACCES;
883899
}
884900

885-
obj = container_of(node, struct drm_gem_object, vma_node);
886-
ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
901+
ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
902+
vma);
887903

888-
mutex_unlock(&dev->struct_mutex);
904+
drm_gem_object_unreference_unlocked(obj);
889905

890906
return ret;
891907
}

drivers/gpu/drm/drm_vma_manager.c

Lines changed: 12 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
112112
EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
113113

114114
/**
115-
* drm_vma_offset_lookup() - Find node in offset space
115+
* drm_vma_offset_lookup_locked() - Find node in offset space
116116
* @mgr: Manager object
117117
* @start: Start address for object (page-based)
118118
* @pages: Size of object (page-based)
@@ -122,37 +122,21 @@ EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
122122
* region and the given node will be returned, as long as the node spans the
123123
* whole requested area (given the size in number of pages as @pages).
124124
*
125-
* RETURNS:
126-
* Returns NULL if no suitable node can be found. Otherwise, the best match
127-
* is returned. It's the caller's responsibility to make sure the node doesn't
128-
* get destroyed before the caller can access it.
129-
*/
130-
struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
131-
unsigned long start,
132-
unsigned long pages)
133-
{
134-
struct drm_vma_offset_node *node;
135-
136-
read_lock(&mgr->vm_lock);
137-
node = drm_vma_offset_lookup_locked(mgr, start, pages);
138-
read_unlock(&mgr->vm_lock);
139-
140-
return node;
141-
}
142-
EXPORT_SYMBOL(drm_vma_offset_lookup);
143-
144-
/**
145-
* drm_vma_offset_lookup_locked() - Find node in offset space
146-
* @mgr: Manager object
147-
* @start: Start address for object (page-based)
148-
* @pages: Size of object (page-based)
125+
* Note that before lookup the vma offset manager lookup lock must be acquired
126+
* with drm_vma_offset_lock_lookup(). See there for an example. This can then be
127+
* used to implement weakly referenced lookups using kref_get_unless_zero().
149128
*
150-
* Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup
151-
* manually. See drm_vma_offset_lock_lookup() for an example.
129+
* Example:
130+
* drm_vma_offset_lock_lookup(mgr);
131+
* node = drm_vma_offset_lookup_locked(mgr);
132+
* if (node)
133+
* kref_get_unless_zero(container_of(node, sth, entr));
134+
* drm_vma_offset_unlock_lookup(mgr);
152135
*
153136
* RETURNS:
154137
* Returns NULL if no suitable node can be found. Otherwise, the best match
155-
* is returned.
138+
* is returned. It's the caller's responsibility to make sure the node doesn't
139+
* get destroyed before the caller can access it.
156140
*/
157141
struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
158142
unsigned long start,

include/drm/drm_vma_manager.h

Lines changed: 6 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -54,9 +54,6 @@ void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
5454
unsigned long page_offset, unsigned long size);
5555
void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr);
5656

57-
struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
58-
unsigned long start,
59-
unsigned long pages);
6057
struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
6158
unsigned long start,
6259
unsigned long pages);
@@ -71,25 +68,25 @@ bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
7168
struct file *filp);
7269

7370
/**
74-
* drm_vma_offset_exact_lookup() - Look up node by exact address
71+
* drm_vma_offset_exact_lookup_locked() - Look up node by exact address
7572
* @mgr: Manager object
7673
* @start: Start address (page-based, not byte-based)
7774
* @pages: Size of object (page-based)
7875
*
79-
* Same as drm_vma_offset_lookup() but does not allow any offset into the node.
76+
* Same as drm_vma_offset_lookup_locked() but does not allow any offset into the node.
8077
* It only returns the exact object with the given start address.
8178
*
8279
* RETURNS:
8380
* Node at exact start address @start.
8481
*/
8582
static inline struct drm_vma_offset_node *
86-
drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr,
87-
unsigned long start,
88-
unsigned long pages)
83+
drm_vma_offset_exact_lookup_locked(struct drm_vma_offset_manager *mgr,
84+
unsigned long start,
85+
unsigned long pages)
8986
{
9087
struct drm_vma_offset_node *node;
9188

92-
node = drm_vma_offset_lookup(mgr, start, pages);
89+
node = drm_vma_offset_lookup_locked(mgr, start, pages);
9390
return (node && node->vm_node.start == start) ? node : NULL;
9491
}
9592

@@ -108,13 +105,6 @@ drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr,
108105
* not call any other VMA helpers while holding this lock.
109106
*
110107
* Note: You're in atomic-context while holding this lock!
111-
*
112-
* Example:
113-
* drm_vma_offset_lock_lookup(mgr);
114-
* node = drm_vma_offset_lookup_locked(mgr);
115-
* if (node)
116-
* kref_get_unless_zero(container_of(node, sth, entr));
117-
* drm_vma_offset_unlock_lookup(mgr);
118108
*/
119109
static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr)
120110
{

0 commit comments

Comments
 (0)