Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * fence-chain: chain fences together in a timeline
4 : *
5 : * Copyright (C) 2018 Advanced Micro Devices, Inc.
6 : * Authors:
7 : * Christian König <christian.koenig@amd.com>
8 : */
9 :
10 : #include <linux/dma-fence-chain.h>
11 :
12 : static bool dma_fence_chain_enable_signaling(struct dma_fence *fence);
13 :
14 : /**
15 : * dma_fence_chain_get_prev - use RCU to get a reference to the previous fence
16 : * @chain: chain node to get the previous node from
17 : *
18 : * Use dma_fence_get_rcu_safe to get a reference to the previous fence of the
19 : * chain node.
20 : */
21 : static struct dma_fence *dma_fence_chain_get_prev(struct dma_fence_chain *chain)
22 : {
23 : struct dma_fence *prev;
24 :
25 : rcu_read_lock();
26 0 : prev = dma_fence_get_rcu_safe(&chain->prev);
27 : rcu_read_unlock();
28 : return prev;
29 : }
30 :
31 : /**
32 : * dma_fence_chain_walk - chain walking function
33 : * @fence: current chain node
34 : *
35 : * Walk the chain to the next node. Returns the next fence or NULL if we are at
36 : * the end of the chain. Garbage collects chain nodes which are already
37 : * signaled.
38 : */
39 0 : struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence)
40 : {
41 : struct dma_fence_chain *chain, *prev_chain;
42 : struct dma_fence *prev, *replacement, *tmp;
43 :
44 0 : chain = to_dma_fence_chain(fence);
45 0 : if (!chain) {
46 : dma_fence_put(fence);
47 : return NULL;
48 : }
49 :
50 0 : while ((prev = dma_fence_chain_get_prev(chain))) {
51 :
52 0 : prev_chain = to_dma_fence_chain(prev);
53 0 : if (prev_chain) {
54 0 : if (!dma_fence_is_signaled(prev_chain->fence))
55 : break;
56 :
57 0 : replacement = dma_fence_chain_get_prev(prev_chain);
58 : } else {
59 0 : if (!dma_fence_is_signaled(prev))
60 : break;
61 :
62 : replacement = NULL;
63 : }
64 :
65 0 : tmp = unrcu_pointer(cmpxchg(&chain->prev, RCU_INITIALIZER(prev),
66 : RCU_INITIALIZER(replacement)));
67 0 : if (tmp == prev)
68 : dma_fence_put(tmp);
69 : else
70 : dma_fence_put(replacement);
71 : dma_fence_put(prev);
72 : }
73 :
74 : dma_fence_put(fence);
75 : return prev;
76 : }
77 : EXPORT_SYMBOL(dma_fence_chain_walk);
78 :
79 : /**
80 : * dma_fence_chain_find_seqno - find fence chain node by seqno
81 : * @pfence: pointer to the chain node where to start
82 : * @seqno: the sequence number to search for
83 : *
84 : * Advance the fence pointer to the chain node which will signal this sequence
85 : * number. If no sequence number is provided then this is a no-op.
86 : *
87 : * Returns EINVAL if the fence is not a chain node or the sequence number has
88 : * not yet advanced far enough.
89 : */
90 0 : int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno)
91 : {
92 : struct dma_fence_chain *chain;
93 :
94 0 : if (!seqno)
95 : return 0;
96 :
97 0 : chain = to_dma_fence_chain(*pfence);
98 0 : if (!chain || chain->base.seqno < seqno)
99 : return -EINVAL;
100 :
101 0 : dma_fence_chain_for_each(*pfence, &chain->base) {
102 0 : if ((*pfence)->context != chain->base.context ||
103 0 : to_dma_fence_chain(*pfence)->prev_seqno < seqno)
104 : break;
105 : }
106 0 : dma_fence_put(&chain->base);
107 :
108 : return 0;
109 : }
110 : EXPORT_SYMBOL(dma_fence_chain_find_seqno);
111 :
112 0 : static const char *dma_fence_chain_get_driver_name(struct dma_fence *fence)
113 : {
114 0 : return "dma_fence_chain";
115 : }
116 :
117 0 : static const char *dma_fence_chain_get_timeline_name(struct dma_fence *fence)
118 : {
119 0 : return "unbound";
120 : }
121 :
122 0 : static void dma_fence_chain_irq_work(struct irq_work *work)
123 : {
124 : struct dma_fence_chain *chain;
125 :
126 0 : chain = container_of(work, typeof(*chain), work);
127 :
128 : /* Try to rearm the callback */
129 0 : if (!dma_fence_chain_enable_signaling(&chain->base))
130 : /* Ok, we are done. No more unsignaled fences left */
131 0 : dma_fence_signal(&chain->base);
132 0 : dma_fence_put(&chain->base);
133 0 : }
134 :
135 0 : static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
136 : {
137 : struct dma_fence_chain *chain;
138 :
139 0 : chain = container_of(cb, typeof(*chain), cb);
140 0 : init_irq_work(&chain->work, dma_fence_chain_irq_work);
141 0 : irq_work_queue(&chain->work);
142 0 : dma_fence_put(f);
143 0 : }
144 :
145 0 : static bool dma_fence_chain_enable_signaling(struct dma_fence *fence)
146 : {
147 0 : struct dma_fence_chain *head = to_dma_fence_chain(fence);
148 :
149 0 : dma_fence_get(&head->base);
150 0 : dma_fence_chain_for_each(fence, &head->base) {
151 0 : struct dma_fence *f = dma_fence_chain_contained(fence);
152 :
153 0 : dma_fence_get(f);
154 0 : if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) {
155 0 : dma_fence_put(fence);
156 0 : return true;
157 : }
158 0 : dma_fence_put(f);
159 : }
160 0 : dma_fence_put(&head->base);
161 : return false;
162 : }
163 :
164 0 : static bool dma_fence_chain_signaled(struct dma_fence *fence)
165 : {
166 0 : dma_fence_chain_for_each(fence, fence) {
167 0 : struct dma_fence *f = dma_fence_chain_contained(fence);
168 :
169 0 : if (!dma_fence_is_signaled(f)) {
170 0 : dma_fence_put(fence);
171 0 : return false;
172 : }
173 : }
174 :
175 : return true;
176 : }
177 :
178 0 : static void dma_fence_chain_release(struct dma_fence *fence)
179 : {
180 0 : struct dma_fence_chain *chain = to_dma_fence_chain(fence);
181 : struct dma_fence *prev;
182 :
183 : /* Manually unlink the chain as much as possible to avoid recursion
184 : * and potential stack overflow.
185 : */
186 0 : while ((prev = rcu_dereference_protected(chain->prev, true))) {
187 : struct dma_fence_chain *prev_chain;
188 :
189 0 : if (kref_read(&prev->refcount) > 1)
190 : break;
191 :
192 0 : prev_chain = to_dma_fence_chain(prev);
193 0 : if (!prev_chain)
194 : break;
195 :
196 : /* No need for atomic operations since we hold the last
197 : * reference to prev_chain.
198 : */
199 0 : chain->prev = prev_chain->prev;
200 0 : RCU_INIT_POINTER(prev_chain->prev, NULL);
201 : dma_fence_put(prev);
202 : }
203 0 : dma_fence_put(prev);
204 :
205 0 : dma_fence_put(chain->fence);
206 0 : dma_fence_free(fence);
207 0 : }
208 :
209 :
210 0 : static void dma_fence_chain_set_deadline(struct dma_fence *fence,
211 : ktime_t deadline)
212 : {
213 0 : dma_fence_chain_for_each(fence, fence) {
214 0 : struct dma_fence *f = dma_fence_chain_contained(fence);
215 :
216 0 : dma_fence_set_deadline(f, deadline);
217 : }
218 0 : }
219 :
220 : const struct dma_fence_ops dma_fence_chain_ops = {
221 : .use_64bit_seqno = true,
222 : .get_driver_name = dma_fence_chain_get_driver_name,
223 : .get_timeline_name = dma_fence_chain_get_timeline_name,
224 : .enable_signaling = dma_fence_chain_enable_signaling,
225 : .signaled = dma_fence_chain_signaled,
226 : .release = dma_fence_chain_release,
227 : .set_deadline = dma_fence_chain_set_deadline,
228 : };
229 : EXPORT_SYMBOL(dma_fence_chain_ops);
230 :
231 : /**
232 : * dma_fence_chain_init - initialize a fence chain
233 : * @chain: the chain node to initialize
234 : * @prev: the previous fence
235 : * @fence: the current fence
236 : * @seqno: the sequence number to use for the fence chain
237 : *
238 : * Initialize a new chain node and either start a new chain or add the node to
239 : * the existing chain of the previous fence.
240 : */
241 0 : void dma_fence_chain_init(struct dma_fence_chain *chain,
242 : struct dma_fence *prev,
243 : struct dma_fence *fence,
244 : uint64_t seqno)
245 : {
246 0 : struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev);
247 : uint64_t context;
248 :
249 0 : spin_lock_init(&chain->lock);
250 0 : rcu_assign_pointer(chain->prev, prev);
251 0 : chain->fence = fence;
252 0 : chain->prev_seqno = 0;
253 :
254 : /* Try to reuse the context of the previous chain node. */
255 0 : if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
256 0 : context = prev->context;
257 0 : chain->prev_seqno = prev->seqno;
258 : } else {
259 0 : context = dma_fence_context_alloc(1);
260 : /* Make sure that we always have a valid sequence number. */
261 0 : if (prev_chain)
262 0 : seqno = max(prev->seqno, seqno);
263 : }
264 :
265 0 : dma_fence_init(&chain->base, &dma_fence_chain_ops,
266 : &chain->lock, context, seqno);
267 :
268 : /*
269 : * Chaining dma_fence_chain container together is only allowed through
270 : * the prev fence and not through the contained fence.
271 : *
272 : * The correct way of handling this is to flatten out the fence
273 : * structure into a dma_fence_array by the caller instead.
274 : */
275 0 : WARN_ON(dma_fence_is_chain(fence));
276 0 : }
277 : EXPORT_SYMBOL(dma_fence_chain_init);
|