Commit 5fd7430a authored by Oleksandr Natalenko's avatar Oleksandr Natalenko

Merge branch 'fixes-5.5' into pf-5.5

parents 5885e7fb 4d70b32b
......@@ -174,7 +174,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
GEM_BUG_ON(vma->obj != obj);
spin_unlock(&obj->vma.lock);
i915_vma_destroy(vma);
__i915_vma_put(vma);
spin_lock(&obj->vma.lock);
}
......
......@@ -1110,8 +1110,7 @@ static int __igt_write_huge(struct intel_context *ce,
out_vma_unpin:
i915_vma_unpin(vma);
out_vma_close:
i915_vma_destroy(vma);
__i915_vma_put(vma);
return err;
}
......
......@@ -161,7 +161,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
kunmap(p);
out:
i915_vma_destroy(vma);
__i915_vma_put(vma);
return err;
}
......@@ -255,7 +255,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
if (err)
return err;
i915_vma_destroy(vma);
__i915_vma_put(vma);
if (igt_timeout(end_time,
"%s: timed out after tiling=%d stride=%d\n",
......
......@@ -91,10 +91,9 @@ static void debug_active_init(struct i915_active *ref)
static void debug_active_activate(struct i915_active *ref)
{
spin_lock_irq(&ref->tree_lock);
lockdep_assert_held(&ref->tree_lock);
if (!atomic_read(&ref->count)) /* before the first inc */
debug_object_activate(ref, &active_debug_desc);
spin_unlock_irq(&ref->tree_lock);
}
static void debug_active_deactivate(struct i915_active *ref)
......@@ -407,8 +406,10 @@ int i915_active_acquire(struct i915_active *ref)
if (!atomic_read(&ref->count) && ref->active)
err = ref->active(ref);
if (!err) {
spin_lock_irq(&ref->tree_lock); /* vs __active_retire() */
debug_active_activate(ref);
atomic_inc(&ref->count);
spin_unlock_irq(&ref->tree_lock);
}
mutex_unlock(&ref->mutex);
......
......@@ -119,33 +119,65 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
unsigned long flags)
{
struct i915_vma *vma;
struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
LIST_HEAD(still_in_list);
int ret = 0;
intel_wakeref_t wakeref;
struct i915_vma *vma;
int ret;
if (!atomic_read(&obj->bind_count))
return 0;
/*
* As some machines use ACPI to handle runtime-resume callbacks, and
* ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex
* as they are required by the shrinker. Ergo, we wake the device up
* first just in case.
*/
wakeref = intel_runtime_pm_get(rpm);
try_again:
ret = 0;
spin_lock(&obj->vma.lock);
while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
struct i915_vma,
obj_link))) {
struct i915_address_space *vm = vma->vm;
ret = -EBUSY;
list_move_tail(&vma->obj_link, &still_in_list);
if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
continue;
ret = -EAGAIN;
if (!i915_vm_tryopen(vm))
break;
list_move_tail(&vma->obj_link, &still_in_list);
/* Prevent vma being freed by i915_vma_parked as we unbind */
vma = __i915_vma_get(vma);
spin_unlock(&obj->vma.lock);
if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
!i915_vma_is_active(vma))
ret = i915_vma_unbind(vma);
if (vma) {
ret = -EBUSY;
if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
!i915_vma_is_active(vma))
ret = i915_vma_unbind(vma);
__i915_vma_put(vma);
}
i915_vm_close(vm);
spin_lock(&obj->vma.lock);
}
list_splice(&still_in_list, &obj->vma.list);
list_splice_init(&still_in_list, &obj->vma.list);
spin_unlock(&obj->vma.lock);
if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_ACTIVE) {
rcu_barrier(); /* flush the i915_vm_release() */
goto try_again;
}
intel_runtime_pm_put(rpm, wakeref);
return ret;
}
......
......@@ -522,7 +522,7 @@ void __i915_vm_close(struct i915_address_space *vm)
atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
WARN_ON(__i915_vma_unbind(vma));
i915_vma_destroy(vma);
__i915_vma_put(vma);
i915_gem_object_put(obj);
}
......@@ -1790,7 +1790,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
i915_vma_destroy(ppgtt->vma);
__i915_vma_put(ppgtt->vma);
gen6_ppgtt_free_pd(ppgtt);
free_scratch(vm);
......@@ -1878,6 +1878,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
i915_active_init(&vma->active, NULL, NULL);
kref_init(&vma->ref);
mutex_init(&vma->pages_mutex);
vma->vm = i915_vm_get(&ggtt->vm);
vma->ops = &pd_vma_ops;
......
......@@ -112,6 +112,7 @@ vma_create(struct drm_i915_gem_object *obj,
if (vma == NULL)
return ERR_PTR(-ENOMEM);
kref_init(&vma->ref);
mutex_init(&vma->pages_mutex);
vma->vm = i915_vm_get(vm);
vma->ops = &vm->vma_ops;
......@@ -978,8 +979,10 @@ void i915_vma_reopen(struct i915_vma *vma)
__i915_vma_remove_closed(vma);
}
void i915_vma_destroy(struct i915_vma *vma)
void i915_vma_release(struct kref *ref)
{
struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
if (drm_mm_node_allocated(&vma->node)) {
mutex_lock(&vma->vm->mutex);
atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
......@@ -1027,7 +1030,7 @@ void i915_vma_parked(struct intel_gt *gt)
spin_unlock_irq(&gt->closed_lock);
if (obj) {
i915_vma_destroy(vma);
__i915_vma_put(vma);
i915_gem_object_put(obj);
}
......@@ -1192,7 +1195,7 @@ int __i915_vma_unbind(struct i915_vma *vma)
i915_vma_detach(vma);
vma_unbind_pages(vma);
drm_mm_remove_node(&vma->node); /* pairs with i915_vma_destroy() */
drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
return 0;
}
......
......@@ -51,14 +51,19 @@ enum i915_cache_level;
*/
struct i915_vma {
struct drm_mm_node node;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
const struct i915_vma_ops *ops;
struct i915_fence_reg *fence;
struct drm_i915_gem_object *obj;
struct dma_resv *resv; /** Alias of obj->resv */
struct sg_table *pages;
void __iomem *iomap;
void *private; /* owned by creator */
struct i915_fence_reg *fence;
u64 size;
u64 display_alignment;
struct i915_page_sizes page_sizes;
......@@ -71,6 +76,7 @@ struct i915_vma {
* handles (but same file) for execbuf, i.e. the number of aliases
* that exist in the ctx->handle_vmas LUT for this vma.
*/
struct kref ref;
atomic_t open_count;
atomic_t flags;
/**
......@@ -333,7 +339,20 @@ int __must_check i915_vma_unbind(struct i915_vma *vma);
void i915_vma_unlink_ctx(struct i915_vma *vma);
void i915_vma_close(struct i915_vma *vma);
void i915_vma_reopen(struct i915_vma *vma);
void i915_vma_destroy(struct i915_vma *vma);
static inline struct i915_vma *__i915_vma_get(struct i915_vma *vma)
{
if (kref_get_unless_zero(&vma->ref))
return vma;
return NULL;
}
void i915_vma_release(struct kref *ref);
static inline void __i915_vma_put(struct i915_vma *vma)
{
kref_put(&vma->ref, i915_vma_release);
}
#define assert_vma_held(vma) dma_resv_assert_held((vma)->resv)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment