From 29bffcd3e48b270b6cd00955c5dcba18cbe2813e Mon Sep 17 00:00:00 2001 From: =?utf8?q?Fran=C3=A7ois=20Tigeot?= Date: Thu, 27 Sep 2018 07:07:37 +0200 Subject: [PATCH] Revert "drm/ttm: convert to the reservation api" This reverts commit c05dd8dc1634d5a751604913c322ee97f9510e46. There is something very wrong with ww mutexes. --- sys/dev/drm/include/drm/ttm/ttm_bo_api.h | 16 ++- sys/dev/drm/include/drm/ttm/ttm_bo_driver.h | 7 +- sys/dev/drm/include/drm/ttm/ttm_execbuf_util.h | 3 +- sys/dev/drm/ttm/ttm_bo.c | 184 ++++++++++++++++++------- sys/dev/drm/ttm/ttm_bo_util.c | 14 +- sys/dev/drm/ttm/ttm_execbuf_util.c | 50 ++++--- 6 files changed, 185 insertions(+), 89 deletions(-) diff --git a/sys/dev/drm/include/drm/ttm/ttm_bo_api.h b/sys/dev/drm/include/drm/ttm/ttm_bo_api.h index bd4490d329..40accbd16f 100644 --- a/sys/dev/drm/include/drm/ttm/ttm_bo_api.h +++ b/sys/dev/drm/include/drm/ttm/ttm_bo_api.h @@ -220,6 +220,8 @@ struct ttm_buffer_object { struct kref kref; struct kref list_kref; + wait_queue_head_t event_queue; + /** * Members protected by the bo::reserved lock. */ @@ -243,6 +245,15 @@ struct ttm_buffer_object { struct list_head ddestroy; struct list_head swap; struct list_head io_reserve_lru; + unsigned long val_seq; + bool seq_valid; + + /** + * Members protected by the bdev::lru_lock + * only when written to. + */ + + atomic_t reserved; /** * Members protected by struct buffer_object_device::fence_lock @@ -272,9 +283,6 @@ struct ttm_buffer_object { uint32_t cur_placement; struct sg_table *sg; - - struct reservation_object *resv; - struct reservation_object ttm_resv; }; /** @@ -728,7 +736,7 @@ extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev); */ static inline bool ttm_bo_is_reserved(struct ttm_buffer_object *bo) { - return ww_mutex_is_locked(&bo->resv->lock); + return atomic_read(&bo->reserved); } #endif diff --git a/sys/dev/drm/include/drm/ttm/ttm_bo_driver.h b/sys/dev/drm/include/drm/ttm/ttm_bo_driver.h index bf1a5e2b1e..36f3846c48 100644 --- a/sys/dev/drm/include/drm/ttm/ttm_bo_driver.h +++ b/sys/dev/drm/include/drm/ttm/ttm_bo_driver.h @@ -34,17 +34,12 @@ #include #include #include -#include #include #include -#include +#include #include -#include -#include #include -#include - /* XXX nasty hack, but does the job */ #undef RB_ROOT #define RB_ROOT(head) (head)->rbh_root diff --git a/sys/dev/drm/include/drm/ttm/ttm_execbuf_util.h b/sys/dev/drm/include/drm/ttm/ttm_execbuf_util.h index a7d1937151..d84087af89 100644 --- a/sys/dev/drm/include/drm/ttm/ttm_execbuf_util.h +++ b/sys/dev/drm/include/drm/ttm/ttm_execbuf_util.h @@ -27,12 +27,13 @@ /* * Authors: Thomas Hellstrom */ +/* $FreeBSD: head/sys/dev/drm2/ttm/ttm_execbuf_util.h 247835 2013-03-05 09:49:34Z kib $ */ #ifndef _TTM_EXECBUF_UTIL_H_ #define _TTM_EXECBUF_UTIL_H_ #include -#include +#include /** * struct ttm_validate_buffer diff --git a/sys/dev/drm/ttm/ttm_bo.c b/sys/dev/drm/ttm/ttm_bo.c index 68a247d4a3..2f825bbbc2 100644 --- a/sys/dev/drm/ttm/ttm_bo.c +++ b/sys/dev/drm/ttm/ttm_bo.c @@ -150,9 +150,6 @@ static void ttm_bo_release_list(struct kref *list_kref) if (bo->ttm) ttm_tt_destroy(bo->ttm); atomic_dec(&bo->glob->bo_count); - if (bo->resv == &bo->ttm_resv) - reservation_object_fini(&bo->ttm_resv); - if (bo->destroy) bo->destroy(bo); else { @@ -161,6 +158,18 @@ static void ttm_bo_release_list(struct kref *list_kref) ttm_mem_global_free(bdev->glob->mem_glob, acc_size); } +static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, + bool interruptible) +{ + if (interruptible) { + return wait_event_interruptible(bo->event_queue, + !ttm_bo_is_reserved(bo)); + } else { + wait_event(bo->event_queue, !ttm_bo_is_reserved(bo)); + return 0; + } +} + void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; @@ -209,27 +218,65 @@ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, bool no_wait, bool use_ticket, struct ww_acquire_ctx *ticket) { - int ret = 0; + int ret; - if (no_wait) { - bool success; + while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { + /** + * Deadlock avoidance for multi-bo reserving. + */ + if (use_ticket && bo->seq_valid) { + /** + * We've already reserved this one. + */ + if (unlikely(ticket->stamp == bo->val_seq)) + return -EDEADLK; + /** + * Already reserved by a thread that will not back + * off for us. We need to back off. + */ + if (unlikely(ticket->stamp - bo->val_seq <= LONG_MAX)) + return -EAGAIN; + } - /* not valid any more, fix your locking! */ - if (WARN_ON(ticket)) + if (no_wait) return -EBUSY; - success = ww_mutex_trylock(&bo->resv->lock); - return success ? 0 : -EBUSY; + ret = ttm_bo_wait_unreserved(bo, interruptible); + + if (unlikely(ret)) + return ret; } - if (interruptible) - ret = ww_mutex_lock_interruptible(&bo->resv->lock, - ticket); - else - ret = ww_mutex_lock(&bo->resv->lock, ticket); - if (ret == -EINTR) - return -ERESTARTSYS; - return ret; + if (use_ticket) { + bool wake_up = false; + + /** + * Wake up waiters that may need to recheck for deadlock, + * if we decreased the sequence number. + */ + if (unlikely((bo->val_seq - ticket->stamp <= LONG_MAX) + || !bo->seq_valid)) + wake_up = true; + + /* + * In the worst case with memory ordering these values can be + * seen in the wrong order. However since we call wake_up_all + * in that case, this will hopefully not pose a problem, + * and the worst case would only cause someone to accidentally + * hit -EAGAIN in ttm_bo_reserve when they see old value of + * val_seq. However this would only happen if seq_valid was + * written before val_seq was, and just means some slightly + * increased cpu usage + */ + bo->val_seq = ticket->stamp; + bo->seq_valid = true; + if (wake_up) + wake_up_all(&bo->event_queue); + } else { + bo->seq_valid = false; + } + + return 0; } EXPORT_SYMBOL(ttm_bo_reserve); @@ -266,35 +313,72 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo, return ret; } +int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, + bool interruptible, + struct ww_acquire_ctx *ticket) +{ + bool wake_up = false; + int ret; + + while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { + WARN_ON(bo->seq_valid && ticket->stamp == bo->val_seq); + + ret = ttm_bo_wait_unreserved(bo, interruptible); + + if (unlikely(ret)) + return ret; + } + + if (bo->val_seq - ticket->stamp < LONG_MAX || !bo->seq_valid) + wake_up = true; + + /** + * Wake up waiters that may need to recheck for deadlock, + * if we decreased the sequence number. + */ + bo->val_seq = ticket->stamp; + bo->seq_valid = true; + if (wake_up) + wake_up_all(&bo->event_queue); + + return 0; +} + int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, bool interruptible, struct ww_acquire_ctx *ticket) { struct ttm_bo_global *glob = bo->glob; - int put_count = 0; - int ret = 0; + int put_count, ret; - if (interruptible) - ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, - ticket); - else - ww_mutex_lock_slow(&bo->resv->lock, ticket); - - if (likely(ret == 0)) { + ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, ticket); + if (likely(!ret)) { lockmgr(&glob->lru_lock, LK_EXCLUSIVE); put_count = ttm_bo_del_from_lru(bo); lockmgr(&glob->lru_lock, LK_RELEASE); ttm_bo_list_ref_sub(bo, put_count, true); - } else if (ret == -EINTR) - ret = -ERESTARTSYS; - + } return ret; } EXPORT_SYMBOL(ttm_bo_reserve_slowpath); +/* + * Must interlock with event_queue to avoid race against + * wait_event_common() which can cause wait_event_common() + * to become stuck. + */ +static void +ttm_bo_unreserve_core(struct ttm_buffer_object *bo) +{ + lockmgr(&bo->event_queue.lock, LK_EXCLUSIVE); + atomic_set(&bo->reserved, 0); + lockmgr(&bo->event_queue.lock, LK_RELEASE); + wake_up_all(&bo->event_queue); +} + void ttm_bo_unreserve_ticket_locked(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket) { ttm_bo_add_to_lru(bo); - ww_mutex_unlock(&bo->resv->lock); + ttm_bo_unreserve_core(bo); } void ttm_bo_unreserve(struct ttm_buffer_object *bo) @@ -486,8 +570,16 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) bo->ttm = NULL; } ttm_bo_mem_put(bo, &bo->mem); + ttm_bo_unreserve_core(bo); - ww_mutex_unlock (&bo->resv->lock); + /* + * Since the final reference to this bo may not be dropped by + * the current task we have to put a memory barrier here to make + * sure the changes done in this function are always visible. + * + * This function only needs protection against the final kref_put. + */ + cpu_mfence(); } static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) @@ -531,8 +623,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) ttm_bo_add_to_lru(bo); } - if (!ret) - ww_mutex_unlock(&bo->resv->lock); + ttm_bo_unreserve_core(bo); } kref_get(&bo->list_kref); @@ -583,7 +674,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, sync_obj = driver->sync_obj_ref(bo->sync_obj); lockmgr(&bdev->fence_lock, LK_RELEASE); - ww_mutex_unlock(&bo->resv->lock); + ttm_bo_unreserve_core(bo); lockmgr(&glob->lru_lock, LK_RELEASE); ret = driver->sync_obj_wait(sync_obj, false, interruptible); @@ -621,7 +712,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, lockmgr(&bdev->fence_lock, LK_RELEASE); if (ret || unlikely(list_empty(&bo->ddestroy))) { - ww_mutex_unlock(&bo->resv->lock); + ttm_bo_unreserve_core(bo); lockmgr(&glob->lru_lock, LK_RELEASE); return ret; } @@ -1202,7 +1293,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev, int ret = 0; unsigned long num_pages; struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; - bool locked; ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); if (ret) { @@ -1229,6 +1319,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev, kref_init(&bo->kref); kref_init(&bo->list_kref); atomic_set(&bo->cpu_writers, 0); + atomic_set(&bo->reserved, 1); + init_waitqueue_head(&bo->event_queue); INIT_LIST_HEAD(&bo->lru); INIT_LIST_HEAD(&bo->ddestroy); INIT_LIST_HEAD(&bo->swap); @@ -1247,11 +1339,10 @@ int ttm_bo_init(struct ttm_bo_device *bdev, bo->mem.bus.io_reserved_count = 0; bo->priv_flags = 0; bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); + bo->seq_valid = false; bo->persistent_swap_storage = persistent_swap_storage; bo->acc_size = acc_size; bo->sg = sg; - bo->resv = &bo->ttm_resv; - reservation_object_init(bo->resv); atomic_inc(&bo->glob->bo_count); /* @@ -1270,17 +1361,16 @@ int ttm_bo_init(struct ttm_bo_device *bdev, goto out_err; } - locked = ww_mutex_trylock(&bo->resv->lock); - WARN_ON(!locked); + ret = ttm_bo_validate(bo, placement, interruptible, false); + if (ret) + goto out_err; - if (likely(!ret)) - ret = ttm_bo_validate(bo, placement, interruptible, false); + ttm_bo_unreserve(bo); + return 0; out_err: ttm_bo_unreserve(bo); - - if (unlikely(ret)) - ttm_bo_unref(&bo); + ttm_bo_unref(&bo); return ret; } @@ -1869,7 +1959,7 @@ out: * already swapped buffer. */ - ww_mutex_unlock(&bo->resv->lock); + ttm_bo_unreserve_core(bo); kref_put(&bo->list_kref, ttm_bo_release_list); return ret; } diff --git a/sys/dev/drm/ttm/ttm_bo_util.c b/sys/dev/drm/ttm/ttm_bo_util.c index 32c3cc622b..7b1020736e 100644 --- a/sys/dev/drm/ttm/ttm_bo_util.c +++ b/sys/dev/drm/ttm/ttm_bo_util.c @@ -30,14 +30,10 @@ #include #include +#include +#include #include -#include #include -#include -#include -#include - -#include void ttm_bo_free_old_node(struct ttm_buffer_object *bo) { @@ -450,7 +446,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, struct ttm_buffer_object *fbo; struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_driver *driver = bdev->driver; - int ret; fbo = kmalloc(sizeof(*fbo), M_DRM, M_WAITOK | M_ZERO); if (!fbo) @@ -463,6 +458,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, * TODO: Explicit member copy would probably be better here. */ + init_waitqueue_head(&fbo->event_queue); INIT_LIST_HEAD(&fbo->ddestroy); INIT_LIST_HEAD(&fbo->lru); INIT_LIST_HEAD(&fbo->swap); @@ -480,10 +476,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, kref_init(&fbo->kref); fbo->destroy = &ttm_transfered_destroy; fbo->acc_size = 0; - bo->resv = &fbo->ttm_resv; - reservation_object_init(fbo->resv); - ret = ww_mutex_trylock(&fbo->resv->lock); - WARN_ON(!ret); /* * Mirror ref from kref_init() for list_kref. diff --git a/sys/dev/drm/ttm/ttm_execbuf_util.c b/sys/dev/drm/ttm/ttm_execbuf_util.c index b5cb11f05c..f357c6f5a5 100644 --- a/sys/dev/drm/ttm/ttm_execbuf_util.c +++ b/sys/dev/drm/ttm/ttm_execbuf_util.c @@ -28,9 +28,8 @@ #include #include #include +#include #include -#include -#include static void ttm_eu_backoff_reservation_locked(struct list_head *list, struct ww_acquire_ctx *ticket) @@ -48,7 +47,8 @@ static void ttm_eu_backoff_reservation_locked(struct list_head *list, entry->removed = false; } else { - ww_mutex_unlock(&bo->resv->lock); + atomic_set(&bo->reserved, 0); + wake_up_all(&bo->event_queue); } } } @@ -133,43 +133,55 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, glob = entry->bo->glob; ww_acquire_init(ticket, &reservation_ww_class); + lockmgr(&glob->lru_lock, LK_EXCLUSIVE); retry: list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; + int owned; /* already slowpath reserved? */ if (entry->reserved) continue; - ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket); - + ret = ttm_bo_reserve_nolru(bo, true, true, true, ticket); + switch (ret) { + case 0: + break; + case -EBUSY: + ttm_eu_del_from_lru_locked(list); + owned = lockstatus(&glob->lru_lock, curthread); + if (owned == LK_EXCLUSIVE) + lockmgr(&glob->lru_lock, LK_RELEASE); + ret = ttm_bo_reserve_nolru(bo, true, false, + true, ticket); + if (owned == LK_EXCLUSIVE) + lockmgr(&glob->lru_lock, LK_EXCLUSIVE); + if (!ret) + break; + + if (unlikely(ret != -EAGAIN)) + goto err; - if (ret == -EDEADLK) { - /* uh oh, we lost out, drop every reservation and try - * to only reserve this buffer, then start over if - * this succeeds. - */ - lockmgr(&glob->lru_lock, LK_EXCLUSIVE); + /* fallthrough */ + case -EAGAIN: ttm_eu_backoff_reservation_locked(list, ticket); lockmgr(&glob->lru_lock, LK_RELEASE); ttm_eu_list_ref_sub(list); - ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, - ticket); - if (unlikely(ret != 0)) { - if (ret == -EINTR) - ret = -ERESTARTSYS; + ret = ttm_bo_reserve_slowpath_nolru(bo, true, ticket); + if (unlikely(ret != 0)) goto err_fini; - } + lockmgr(&glob->lru_lock, LK_EXCLUSIVE); entry->reserved = true; if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { ret = -EBUSY; goto err; } goto retry; - } else if (ret) + default: goto err; + } entry->reserved = true; if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { @@ -179,14 +191,12 @@ retry: } ww_acquire_done(ticket); - lockmgr(&glob->lru_lock, LK_EXCLUSIVE); ttm_eu_del_from_lru_locked(list); lockmgr(&glob->lru_lock, LK_RELEASE); ttm_eu_list_ref_sub(list); return 0; err: - lockmgr(&glob->lru_lock, LK_EXCLUSIVE); ttm_eu_backoff_reservation_locked(list, ticket); lockmgr(&glob->lru_lock, LK_RELEASE); ttm_eu_list_ref_sub(list); -- 2.11.4.GIT