version 1.3, 2021/12/19 01:44:57 |
version 1.4, 2021/12/19 11:52:07 |
Line 19 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 19 __KERNEL_RCSID(0, "$NetBSD$"); |
|
#include "i915_active.h" |
#include "i915_active.h" |
#include "i915_globals.h" |
#include "i915_globals.h" |
|
|
|
#include <linux/nbsd-namespace.h> |
|
|
/* |
/* |
* Active refs memory management |
* Active refs memory management |
* |
* |
Line 36 struct active_node { |
|
Line 38 struct active_node { |
|
struct i915_active *ref; |
struct i915_active *ref; |
struct rb_node node; |
struct rb_node node; |
u64 timeline; |
u64 timeline; |
|
struct intel_engine_cs *engine; |
}; |
}; |
|
|
static inline struct active_node * |
static inline struct active_node * |
Line 54 static inline bool is_barrier(const stru |
|
Line 57 static inline bool is_barrier(const stru |
|
static inline struct llist_node *barrier_to_ll(struct active_node *node) |
static inline struct llist_node *barrier_to_ll(struct active_node *node) |
{ |
{ |
GEM_BUG_ON(!is_barrier(&node->base)); |
GEM_BUG_ON(!is_barrier(&node->base)); |
return (struct llist_node *)&node->base.cb.node; |
return &node->base.llist; |
} |
} |
|
|
static inline struct intel_engine_cs * |
static inline struct intel_engine_cs * |
__barrier_to_engine(struct active_node *node) |
__barrier_to_engine(struct active_node *node) |
{ |
{ |
return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev); |
return READ_ONCE(node->engine); |
} |
} |
|
|
static inline struct intel_engine_cs * |
static inline struct intel_engine_cs * |
Line 72 barrier_to_engine(struct active_node *no |
|
Line 75 barrier_to_engine(struct active_node *no |
|
|
|
static inline struct active_node *barrier_from_ll(struct llist_node *x) |
static inline struct active_node *barrier_from_ll(struct llist_node *x) |
{ |
{ |
return container_of((struct list_head *)x, |
return container_of(x, struct active_node, base.llist); |
struct active_node, base.cb.node); |
|
} |
} |
|
|
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS) |
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS) |
Line 129 static inline void debug_active_assert(s |
|
Line 131 static inline void debug_active_assert(s |
|
|
|
#endif |
#endif |
|
|
|
#ifdef __NetBSD__ |
|
|
|
static int |
|
compare_nodes(void *cookie, const void *va, const void *vb) |
|
{ |
|
const struct active_node *a = va; |
|
const struct active_node *b = vb; |
|
|
|
if (a->timeline < b->timeline) |
|
return -1; |
|
if (a->timeline > b->timeline) |
|
return +1; |
|
return 0; |
|
} |
|
|
|
static int |
|
compare_node_key(void *cookie, const void *vn, const void *vk) |
|
{ |
|
const struct active_node *a = vn; |
|
const uint64_t *k = vk; |
|
|
|
if (a->timeline < *k) |
|
return -1; |
|
if (a->timeline > *k) |
|
return +1; |
|
return 0; |
|
} |
|
|
|
static const rb_tree_ops_t active_rb_ops = { |
|
.rbto_compare_nodes = compare_nodes, |
|
.rbto_compare_key = compare_node_key, |
|
.rbto_node_offset = offsetof(struct active_node, node), |
|
}; |
|
|
|
#endif |
|
|
static void |
static void |
__active_retire(struct i915_active *ref) |
__active_retire(struct i915_active *ref) |
{ |
{ |
Line 146 __active_retire(struct i915_active *ref) |
|
Line 184 __active_retire(struct i915_active *ref) |
|
debug_active_deactivate(ref); |
debug_active_deactivate(ref); |
|
|
root = ref->tree; |
root = ref->tree; |
|
#ifdef __NetBSD__ |
|
rb_tree_init(&ref->tree.rbr_tree, &active_rb_ops); |
|
#else |
ref->tree = RB_ROOT; |
ref->tree = RB_ROOT; |
|
#endif |
ref->cache = NULL; |
ref->cache = NULL; |
|
|
|
DRM_SPIN_WAKEUP_ALL(&ref->tree_wq, &ref->tree_lock); |
|
|
spin_unlock_irqrestore(&ref->tree_lock, flags); |
spin_unlock_irqrestore(&ref->tree_lock, flags); |
|
|
/* After the final retire, the entire struct may be freed */ |
/* After the final retire, the entire struct may be freed */ |
Line 156 __active_retire(struct i915_active *ref) |
|
Line 200 __active_retire(struct i915_active *ref) |
|
ref->retire(ref); |
ref->retire(ref); |
|
|
/* ... except if you wait on it, you must manage your own references! */ |
/* ... except if you wait on it, you must manage your own references! */ |
wake_up_var(ref); |
|
|
|
rbtree_postorder_for_each_entry_safe(it, n, &root, node) { |
rbtree_postorder_for_each_entry_safe(it, n, &root, node) { |
GEM_BUG_ON(i915_active_fence_isset(&it->base)); |
GEM_BUG_ON(i915_active_fence_isset(&it->base)); |
Line 249 active_instance(struct i915_active *ref, |
|
Line 292 active_instance(struct i915_active *ref, |
|
#ifdef __NetBSD__ |
#ifdef __NetBSD__ |
__USE(parent); |
__USE(parent); |
__USE(p); |
__USE(p); |
node = rb_tree_find_node(&vma->active.rbr_tree, &idx); |
node = rb_tree_find_node(&ref->tree.rbr_tree, &idx); |
if (node) { |
if (node) { |
KASSERT(node->timeline == idx); |
KASSERT(node->timeline == idx); |
goto out; |
goto out; |
Line 279 active_instance(struct i915_active *ref, |
|
Line 322 active_instance(struct i915_active *ref, |
|
node->timeline = idx; |
node->timeline = idx; |
|
|
#ifdef __NetBSD__ |
#ifdef __NetBSD__ |
struct i915_vma_active *collision __diagused; |
struct active_node *collision __diagused; |
collision = rb_tree_insert_node(&vma->active.rbr_tree, node); |
collision = rb_tree_insert_node(&ref->tree.rbr_tree, node); |
KASSERT(collision == node); |
KASSERT(collision == node); |
#else |
#else |
rb_link_node(&node->node, parent, p); |
rb_link_node(&node->node, parent, p); |
|
|
return &node->base; |
return &node->base; |
} |
} |
|
|
#ifdef __NetBSD__ |
|
static int |
|
compare_active(void *cookie, const void *va, const void *vb) |
|
{ |
|
const struct i915_active *a = va; |
|
const struct i915_active *b = vb; |
|
|
|
if (a->timeline < b->timeline) |
|
return -1; |
|
if (a->timeline > b->timeline) |
|
return +1; |
|
return 0; |
|
} |
|
|
|
static int |
|
compare_active_key(void *cookie, const void *vn, const void *vk) |
|
{ |
|
const struct i915_active *a = vn; |
|
const uint64_t *k = vk; |
|
|
|
if (a->timeline < *k) |
|
return -1; |
|
if (a->timeline > *k) |
|
return +1; |
|
return 0; |
|
} |
|
|
|
static const rb_tree_ops_t active_rb_ops = { |
|
.rbto_compare_nodes = compare_active, |
|
.rbto_compare_key = compare_active_key, |
|
.rbto_node_offset = offsetof(struct i915_active, node), |
|
}; |
|
#endif |
|
|
|
void __i915_active_init(struct i915_active *ref, |
void __i915_active_init(struct i915_active *ref, |
int (*active)(struct i915_active *ref), |
int (*active)(struct i915_active *ref), |
void (*retire)(struct i915_active *ref), |
void (*retire)(struct i915_active *ref), |
Line 346 void __i915_active_init(struct i915_acti |
|
Line 355 void __i915_active_init(struct i915_acti |
|
ref->flags |= I915_ACTIVE_RETIRE_SLEEPS; |
ref->flags |= I915_ACTIVE_RETIRE_SLEEPS; |
|
|
spin_lock_init(&ref->tree_lock); |
spin_lock_init(&ref->tree_lock); |
|
DRM_INIT_WAITQUEUE(&ref->tree_wq, "i915act"); |
#ifdef __NetBSD__ |
#ifdef __NetBSD__ |
rb_tree_init(&vma->active.rbr_tree, &active_rb_ops); |
rb_tree_init(&ref->tree.rbr_tree, &active_rb_ops); |
#else |
#else |
ref->tree = RB_ROOT; |
ref->tree = RB_ROOT; |
#endif |
#endif |
Line 533 int i915_active_wait(struct i915_active |
|
Line 543 int i915_active_wait(struct i915_active |
|
if (err) |
if (err) |
return err; |
return err; |
|
|
if (wait_var_event_interruptible(ref, i915_active_is_idle(ref))) |
spin_lock(&ref->tree_lock); |
return -EINTR; |
DRM_SPIN_WAIT_UNTIL(err, &ref->tree_wq, &ref->tree_lock, |
|
i915_active_is_idle(ref)); |
|
spin_unlock(&ref->tree_lock); |
|
if (err) |
|
return err; |
|
|
flush_work(&ref->work); |
flush_work(&ref->work); |
return 0; |
return 0; |
Line 599 static struct active_node *reuse_idle_ba |
|
Line 613 static struct active_node *reuse_idle_ba |
|
goto match; |
goto match; |
} |
} |
|
|
|
#ifdef __NetBSD__ |
|
{ |
|
struct active_node *node = |
|
rb_tree_find_node_leq(&ref->tree.rbr_tree, &idx); |
|
if (node) { |
|
if (node->timeline == idx && is_idle_barrier(node, idx)) { |
|
p = &node->node; |
|
goto match; |
|
} |
|
prev = &node->node; |
|
} else { |
|
prev = NULL; |
|
} |
|
} |
|
#else |
prev = NULL; |
prev = NULL; |
p = ref->tree.rb_node; |
p = ref->tree.rb_node; |
while (p) { |
while (p) { |
Line 614 static struct active_node *reuse_idle_ba |
|
Line 643 static struct active_node *reuse_idle_ba |
|
else |
else |
p = p->rb_left; |
p = p->rb_left; |
} |
} |
|
#endif |
|
|
/* |
/* |
* No quick match, but we did find the leftmost rb_node for the |
* No quick match, but we did find the leftmost rb_node for the |
Line 621 static struct active_node *reuse_idle_ba |
|
Line 651 static struct active_node *reuse_idle_ba |
|
* any idle-barriers on this timeline that we missed, or just use |
* any idle-barriers on this timeline that we missed, or just use |
* the first pending barrier. |
* the first pending barrier. |
*/ |
*/ |
for (p = prev; p; p = rb_next(p)) { |
for (p = prev; p; p = rb_next2(&ref->tree, p)) { |
struct active_node *node = |
struct active_node *node = |
rb_entry(p, struct active_node, node); |
rb_entry(p, struct active_node, node); |
struct intel_engine_cs *engine; |
struct intel_engine_cs *engine; |
Line 712 int i915_active_acquire_preallocate_barr |
|
Line 742 int i915_active_acquire_preallocate_barr |
|
* for our tracking of the pending barrier. |
* for our tracking of the pending barrier. |
*/ |
*/ |
RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN)); |
RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN)); |
node->base.cb.node.prev = (void *)engine; |
node->engine = engine; |
atomic_inc(&ref->count); |
atomic_inc(&ref->count); |
} |
} |
GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN)); |
GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN)); |
Line 764 void i915_active_acquire_barrier(struct |
|
Line 794 void i915_active_acquire_barrier(struct |
|
|
|
spin_lock_irqsave_nested(&ref->tree_lock, flags, |
spin_lock_irqsave_nested(&ref->tree_lock, flags, |
SINGLE_DEPTH_NESTING); |
SINGLE_DEPTH_NESTING); |
|
#ifdef __NetBSD__ |
|
__USE(p); |
|
__USE(parent); |
|
struct active_node *collision __diagused; |
|
collision = rb_tree_insert_node(&ref->tree.rbr_tree, node); |
|
KASSERT(collision == node); |
|
#else |
parent = NULL; |
parent = NULL; |
p = &ref->tree.rb_node; |
p = &ref->tree.rb_node; |
while (*p) { |
while (*p) { |
Line 779 void i915_active_acquire_barrier(struct |
|
Line 816 void i915_active_acquire_barrier(struct |
|
} |
} |
rb_link_node(&node->node, parent, p); |
rb_link_node(&node->node, parent, p); |
rb_insert_color(&node->node, &ref->tree); |
rb_insert_color(&node->node, &ref->tree); |
|
#endif |
spin_unlock_irqrestore(&ref->tree_lock, flags); |
spin_unlock_irqrestore(&ref->tree_lock, flags); |
|
|
GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); |
GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); |
Line 814 void i915_request_add_active_barriers(st |
|
Line 852 void i915_request_add_active_barriers(st |
|
llist_for_each_safe(node, next, node) { |
llist_for_each_safe(node, next, node) { |
/* serialise with reuse_idle_barrier */ |
/* serialise with reuse_idle_barrier */ |
smp_store_mb(*ll_to_fence_slot(node), &rq->fence); |
smp_store_mb(*ll_to_fence_slot(node), &rq->fence); |
|
#ifdef __NetBSD__ |
|
spin_unlock(&rq->lock); |
|
struct i915_active_fence *fence = |
|
container_of(node, struct i915_active_fence, llist); |
|
/* XXX something bad went wrong in making this code */ |
|
KASSERT(fence->cb.func == node_retire); |
|
(void)dma_fence_add_callback(fence->fence, &fence->cb, |
|
node_retire); |
|
spin_lock(&rq->lock); |
|
#else |
list_add_tail((struct list_head *)node, &rq->fence.cb_list); |
list_add_tail((struct list_head *)node, &rq->fence.cb_list); |
|
#endif |
} |
} |
spin_unlock_irqrestore(&rq->lock, flags); |
spin_unlock_irqrestore(&rq->lock, flags); |
} |
} |
Line 867 __i915_active_fence_set(struct i915_acti |
|
Line 916 __i915_active_fence_set(struct i915_acti |
|
prev = xchg(__active_fence_slot(active), fence); |
prev = xchg(__active_fence_slot(active), fence); |
if (prev) { |
if (prev) { |
GEM_BUG_ON(prev == fence); |
GEM_BUG_ON(prev == fence); |
|
#ifdef __NetBSD__ |
|
KASSERT(active->cb.func == node_retire); |
|
(void)dma_fence_remove_callback(prev, &active->cb); |
|
#else |
spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING); |
spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING); |
__list_del_entry(&active->cb.node); |
__list_del_entry(&active->cb.node); |
spin_unlock(prev->lock); /* serialise with prev->cb_list */ |
spin_unlock(prev->lock); /* serialise with prev->cb_list */ |
|
#endif |
} |
} |
GEM_BUG_ON(rcu_access_pointer(active->fence) != fence); |
GEM_BUG_ON(rcu_access_pointer(active->fence) != fence); |
|
#ifndef __NetBSD__ |
list_add_tail(&active->cb.node, &fence->cb_list); |
list_add_tail(&active->cb.node, &fence->cb_list); |
|
#endif |
spin_unlock_irqrestore(fence->lock, flags); |
spin_unlock_irqrestore(fence->lock, flags); |
|
|
|
#ifdef __NetBSD__ |
|
KASSERT(active->cb.func == node_retire); |
|
dma_fence_add_callback(fence, &active->cb, node_retire); |
|
#endif |
|
|
return prev; |
return prev; |
} |
} |
|
|