Line data Source code
1 : /*
2 : * Copyright 2017 Red Hat
3 : * Parts ported from amdgpu (fence wait code).
4 : * Copyright 2016 Advanced Micro Devices, Inc.
5 : *
6 : * Permission is hereby granted, free of charge, to any person obtaining a
7 : * copy of this software and associated documentation files (the "Software"),
8 : * to deal in the Software without restriction, including without limitation
9 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 : * and/or sell copies of the Software, and to permit persons to whom the
11 : * Software is furnished to do so, subject to the following conditions:
12 : *
13 : * The above copyright notice and this permission notice (including the next
14 : * paragraph) shall be included in all copies or substantial portions of the
15 : * Software.
16 : *
17 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 : * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 : * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 : * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 : * IN THE SOFTWARE.
24 : *
25 : * Authors:
26 : *
27 : */
28 :
29 : /**
30 : * DOC: Overview
31 : *
32 : * DRM synchronisation objects (syncobj, see struct &drm_syncobj) provide a
33 : * container for a synchronization primitive which can be used by userspace
34 : * to explicitly synchronize GPU commands, can be shared between userspace
35 : * processes, and can be shared between different DRM drivers.
36 : * Their primary use-case is to implement Vulkan fences and semaphores.
37 : * The syncobj userspace API provides ioctls for several operations:
38 : *
39 : * - Creation and destruction of syncobjs
40 : * - Import and export of syncobjs to/from a syncobj file descriptor
41 : * - Import and export a syncobj's underlying fence to/from a sync file
42 : * - Reset a syncobj (set its fence to NULL)
43 : * - Signal a syncobj (set a trivially signaled fence)
44 : * - Wait for a syncobj's fence to appear and be signaled
45 : *
46 : * The syncobj userspace API also provides operations to manipulate a syncobj
47 : * in terms of a timeline of struct &dma_fence_chain rather than a single
48 : * struct &dma_fence, through the following operations:
49 : *
50 : * - Signal a given point on the timeline
51 : * - Wait for a given point to appear and/or be signaled
52 : * - Import and export from/to a given point of a timeline
53 : *
54 : * At it's core, a syncobj is simply a wrapper around a pointer to a struct
55 : * &dma_fence which may be NULL.
56 : * When a syncobj is first created, its pointer is either NULL or a pointer
57 : * to an already signaled fence depending on whether the
58 : * &DRM_SYNCOBJ_CREATE_SIGNALED flag is passed to
59 : * &DRM_IOCTL_SYNCOBJ_CREATE.
60 : *
61 : * If the syncobj is considered as a binary (its state is either signaled or
62 : * unsignaled) primitive, when GPU work is enqueued in a DRM driver to signal
63 : * the syncobj, the syncobj's fence is replaced with a fence which will be
64 : * signaled by the completion of that work.
65 : * If the syncobj is considered as a timeline primitive, when GPU work is
66 : * enqueued in a DRM driver to signal the a given point of the syncobj, a new
67 : * struct &dma_fence_chain pointing to the DRM driver's fence and also
68 : * pointing to the previous fence that was in the syncobj. The new struct
69 : * &dma_fence_chain fence replace the syncobj's fence and will be signaled by
70 : * completion of the DRM driver's work and also any work associated with the
71 : * fence previously in the syncobj.
72 : *
73 : * When GPU work which waits on a syncobj is enqueued in a DRM driver, at the
74 : * time the work is enqueued, it waits on the syncobj's fence before
75 : * submitting the work to hardware. That fence is either :
76 : *
77 : * - The syncobj's current fence if the syncobj is considered as a binary
78 : * primitive.
79 : * - The struct &dma_fence associated with a given point if the syncobj is
80 : * considered as a timeline primitive.
81 : *
82 : * If the syncobj's fence is NULL or not present in the syncobj's timeline,
83 : * the enqueue operation is expected to fail.
84 : *
85 : * With binary syncobj, all manipulation of the syncobjs's fence happens in
86 : * terms of the current fence at the time the ioctl is called by userspace
87 : * regardless of whether that operation is an immediate host-side operation
88 : * (signal or reset) or or an operation which is enqueued in some driver
89 : * queue. &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used
90 : * to manipulate a syncobj from the host by resetting its pointer to NULL or
91 : * setting its pointer to a fence which is already signaled.
92 : *
93 : * With a timeline syncobj, all manipulation of the synobj's fence happens in
94 : * terms of a u64 value referring to point in the timeline. See
95 : * dma_fence_chain_find_seqno() to see how a given point is found in the
96 : * timeline.
97 : *
98 : * Note that applications should be careful to always use timeline set of
99 : * ioctl() when dealing with syncobj considered as timeline. Using a binary
100 : * set of ioctl() with a syncobj considered as timeline could result incorrect
101 : * synchronization. The use of binary syncobj is supported through the
102 : * timeline set of ioctl() by using a point value of 0, this will reproduce
103 : * the behavior of the binary set of ioctl() (for example replace the
104 : * syncobj's fence when signaling).
105 : *
106 : *
107 : * Host-side wait on syncobjs
108 : * --------------------------
109 : *
110 : * &DRM_IOCTL_SYNCOBJ_WAIT takes an array of syncobj handles and does a
111 : * host-side wait on all of the syncobj fences simultaneously.
112 : * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL is set, the wait ioctl will wait on
113 : * all of the syncobj fences to be signaled before it returns.
114 : * Otherwise, it returns once at least one syncobj fence has been signaled
115 : * and the index of a signaled fence is written back to the client.
116 : *
117 : * Unlike the enqueued GPU work dependencies which fail if they see a NULL
118 : * fence in a syncobj, if &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is set,
119 : * the host-side wait will first wait for the syncobj to receive a non-NULL
120 : * fence and then wait on that fence.
121 : * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is not set and any one of the
122 : * syncobjs in the array has a NULL fence, -EINVAL will be returned.
123 : * Assuming the syncobj starts off with a NULL fence, this allows a client
124 : * to do a host wait in one thread (or process) which waits on GPU work
125 : * submitted in another thread (or process) without having to manually
126 : * synchronize between the two.
127 : * This requirement is inherited from the Vulkan fence API.
128 : *
129 : * Similarly, &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT takes an array of syncobj
130 : * handles as well as an array of u64 points and does a host-side wait on all
131 : * of syncobj fences at the given points simultaneously.
132 : *
133 : * &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT also adds the ability to wait for a given
134 : * fence to materialize on the timeline without waiting for the fence to be
135 : * signaled by using the &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE flag. This
136 : * requirement is inherited from the wait-before-signal behavior required by
137 : * the Vulkan timeline semaphore API.
138 : *
139 : *
140 : * Import/export of syncobjs
141 : * -------------------------
142 : *
143 : * &DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE and &DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD
144 : * provide two mechanisms for import/export of syncobjs.
145 : *
146 : * The first lets the client import or export an entire syncobj to a file
147 : * descriptor.
148 : * These fd's are opaque and have no other use case, except passing the
149 : * syncobj between processes.
150 : * All exported file descriptors and any syncobj handles created as a
151 : * result of importing those file descriptors own a reference to the
152 : * same underlying struct &drm_syncobj and the syncobj can be used
153 : * persistently across all the processes with which it is shared.
154 : * The syncobj is freed only once the last reference is dropped.
155 : * Unlike dma-buf, importing a syncobj creates a new handle (with its own
156 : * reference) for every import instead of de-duplicating.
157 : * The primary use-case of this persistent import/export is for shared
158 : * Vulkan fences and semaphores.
159 : *
160 : * The second import/export mechanism, which is indicated by
161 : * &DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE or
162 : * &DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE lets the client
163 : * import/export the syncobj's current fence from/to a &sync_file.
164 : * When a syncobj is exported to a sync file, that sync file wraps the
165 : * sycnobj's fence at the time of export and any later signal or reset
166 : * operations on the syncobj will not affect the exported sync file.
167 : * When a sync file is imported into a syncobj, the syncobj's fence is set
168 : * to the fence wrapped by that sync file.
169 : * Because sync files are immutable, resetting or signaling the syncobj
170 : * will not affect any sync files whose fences have been imported into the
171 : * syncobj.
172 : *
173 : *
174 : * Import/export of timeline points in timeline syncobjs
175 : * -----------------------------------------------------
176 : *
177 : * &DRM_IOCTL_SYNCOBJ_TRANSFER provides a mechanism to transfer a struct
178 : * &dma_fence_chain of a syncobj at a given u64 point to another u64 point
179 : * into another syncobj.
180 : *
181 : * Note that if you want to transfer a struct &dma_fence_chain from a given
182 : * point on a timeline syncobj from/into a binary syncobj, you can use the
183 : * point 0 to mean take/replace the fence in the syncobj.
184 : */
185 :
186 : #include <linux/anon_inodes.h>
187 : #include <linux/dma-fence-unwrap.h>
188 : #include <linux/file.h>
189 : #include <linux/fs.h>
190 : #include <linux/sched/signal.h>
191 : #include <linux/sync_file.h>
192 : #include <linux/uaccess.h>
193 :
194 : #include <drm/drm.h>
195 : #include <drm/drm_drv.h>
196 : #include <drm/drm_file.h>
197 : #include <drm/drm_gem.h>
198 : #include <drm/drm_print.h>
199 : #include <drm/drm_syncobj.h>
200 : #include <drm/drm_utils.h>
201 :
202 : #include "drm_internal.h"
203 :
204 : struct syncobj_wait_entry {
205 : struct list_head node;
206 : struct task_struct *task;
207 : struct dma_fence *fence;
208 : struct dma_fence_cb fence_cb;
209 : u64 point;
210 : };
211 :
212 : static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
213 : struct syncobj_wait_entry *wait);
214 :
215 : /**
216 : * drm_syncobj_find - lookup and reference a sync object.
217 : * @file_private: drm file private pointer
218 : * @handle: sync object handle to lookup.
219 : *
220 : * Returns a reference to the syncobj pointed to by handle or NULL. The
221 : * reference must be released by calling drm_syncobj_put().
222 : */
223 0 : struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
224 : u32 handle)
225 : {
226 : struct drm_syncobj *syncobj;
227 :
228 0 : spin_lock(&file_private->syncobj_table_lock);
229 :
230 : /* Check if we currently have a reference on the object */
231 0 : syncobj = idr_find(&file_private->syncobj_idr, handle);
232 0 : if (syncobj)
233 : drm_syncobj_get(syncobj);
234 :
235 0 : spin_unlock(&file_private->syncobj_table_lock);
236 :
237 0 : return syncobj;
238 : }
239 : EXPORT_SYMBOL(drm_syncobj_find);
240 :
241 0 : static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
242 : struct syncobj_wait_entry *wait)
243 : {
244 : struct dma_fence *fence;
245 :
246 0 : if (wait->fence)
247 0 : return;
248 :
249 0 : spin_lock(&syncobj->lock);
250 : /* We've already tried once to get a fence and failed. Now that we
251 : * have the lock, try one more time just to be sure we don't add a
252 : * callback when a fence has already been set.
253 : */
254 0 : fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
255 0 : if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
256 0 : dma_fence_put(fence);
257 0 : list_add_tail(&wait->node, &syncobj->cb_list);
258 0 : } else if (!fence) {
259 0 : wait->fence = dma_fence_get_stub();
260 : } else {
261 0 : wait->fence = fence;
262 : }
263 0 : spin_unlock(&syncobj->lock);
264 : }
265 :
266 : static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
267 : struct syncobj_wait_entry *wait)
268 : {
269 0 : if (!wait->node.next)
270 : return;
271 :
272 0 : spin_lock(&syncobj->lock);
273 0 : list_del_init(&wait->node);
274 0 : spin_unlock(&syncobj->lock);
275 : }
276 :
277 : /**
278 : * drm_syncobj_add_point - add new timeline point to the syncobj
279 : * @syncobj: sync object to add timeline point do
280 : * @chain: chain node to use to add the point
281 : * @fence: fence to encapsulate in the chain node
282 : * @point: sequence number to use for the point
283 : *
284 : * Add the chain node as new timeline point to the syncobj.
285 : */
286 0 : void drm_syncobj_add_point(struct drm_syncobj *syncobj,
287 : struct dma_fence_chain *chain,
288 : struct dma_fence *fence,
289 : uint64_t point)
290 : {
291 : struct syncobj_wait_entry *cur, *tmp;
292 : struct dma_fence *prev;
293 :
294 0 : dma_fence_get(fence);
295 :
296 0 : spin_lock(&syncobj->lock);
297 :
298 0 : prev = drm_syncobj_fence_get(syncobj);
299 : /* You are adding an unorder point to timeline, which could cause payload returned from query_ioctl is 0! */
300 0 : if (prev && prev->seqno >= point)
301 0 : DRM_DEBUG("You are adding an unorder point to timeline!\n");
302 0 : dma_fence_chain_init(chain, prev, fence, point);
303 0 : rcu_assign_pointer(syncobj->fence, &chain->base);
304 :
305 0 : list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
306 0 : syncobj_wait_syncobj_func(syncobj, cur);
307 0 : spin_unlock(&syncobj->lock);
308 :
309 : /* Walk the chain once to trigger garbage collection */
310 0 : dma_fence_chain_for_each(fence, prev);
311 0 : dma_fence_put(prev);
312 0 : }
313 : EXPORT_SYMBOL(drm_syncobj_add_point);
314 :
315 : /**
316 : * drm_syncobj_replace_fence - replace fence in a sync object.
317 : * @syncobj: Sync object to replace fence in
318 : * @fence: fence to install in sync file.
319 : *
320 : * This replaces the fence on a sync object.
321 : */
322 0 : void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
323 : struct dma_fence *fence)
324 : {
325 : struct dma_fence *old_fence;
326 : struct syncobj_wait_entry *cur, *tmp;
327 :
328 0 : if (fence)
329 : dma_fence_get(fence);
330 :
331 0 : spin_lock(&syncobj->lock);
332 :
333 0 : old_fence = rcu_dereference_protected(syncobj->fence,
334 : lockdep_is_held(&syncobj->lock));
335 0 : rcu_assign_pointer(syncobj->fence, fence);
336 :
337 0 : if (fence != old_fence) {
338 0 : list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
339 0 : syncobj_wait_syncobj_func(syncobj, cur);
340 : }
341 :
342 0 : spin_unlock(&syncobj->lock);
343 :
344 0 : dma_fence_put(old_fence);
345 0 : }
346 : EXPORT_SYMBOL(drm_syncobj_replace_fence);
347 :
348 : /**
349 : * drm_syncobj_assign_null_handle - assign a stub fence to the sync object
350 : * @syncobj: sync object to assign the fence on
351 : *
352 : * Assign a already signaled stub fence to the sync object.
353 : */
354 0 : static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
355 : {
356 0 : struct dma_fence *fence = dma_fence_allocate_private_stub();
357 :
358 0 : if (IS_ERR(fence))
359 0 : return PTR_ERR(fence);
360 :
361 0 : drm_syncobj_replace_fence(syncobj, fence);
362 : dma_fence_put(fence);
363 : return 0;
364 : }
365 :
366 : /* 5s default for wait submission */
367 : #define DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT 5000000000ULL
368 : /**
369 : * drm_syncobj_find_fence - lookup and reference the fence in a sync object
370 : * @file_private: drm file private pointer
371 : * @handle: sync object handle to lookup.
372 : * @point: timeline point
373 : * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not
374 : * @fence: out parameter for the fence
375 : *
376 : * This is just a convenience function that combines drm_syncobj_find() and
377 : * drm_syncobj_fence_get().
378 : *
379 : * Returns 0 on success or a negative error value on failure. On success @fence
380 : * contains a reference to the fence, which must be released by calling
381 : * dma_fence_put().
382 : */
383 0 : int drm_syncobj_find_fence(struct drm_file *file_private,
384 : u32 handle, u64 point, u64 flags,
385 : struct dma_fence **fence)
386 : {
387 0 : struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
388 : struct syncobj_wait_entry wait;
389 0 : u64 timeout = nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT);
390 : int ret;
391 :
392 0 : if (!syncobj)
393 : return -ENOENT;
394 :
395 : /* Waiting for userspace with locks help is illegal cause that can
396 : * trivial deadlock with page faults for example. Make lockdep complain
397 : * about it early on.
398 : */
399 0 : if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
400 : might_sleep();
401 : lockdep_assert_none_held_once();
402 : }
403 :
404 0 : *fence = drm_syncobj_fence_get(syncobj);
405 :
406 0 : if (*fence) {
407 0 : ret = dma_fence_chain_find_seqno(fence, point);
408 0 : if (!ret) {
409 : /* If the requested seqno is already signaled
410 : * drm_syncobj_find_fence may return a NULL
411 : * fence. To make sure the recipient gets
412 : * signalled, use a new fence instead.
413 : */
414 0 : if (!*fence)
415 0 : *fence = dma_fence_get_stub();
416 :
417 : goto out;
418 : }
419 0 : dma_fence_put(*fence);
420 : } else {
421 : ret = -EINVAL;
422 : }
423 :
424 0 : if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
425 : goto out;
426 :
427 0 : memset(&wait, 0, sizeof(wait));
428 0 : wait.task = current;
429 0 : wait.point = point;
430 0 : drm_syncobj_fence_add_wait(syncobj, &wait);
431 :
432 : do {
433 0 : set_current_state(TASK_INTERRUPTIBLE);
434 0 : if (wait.fence) {
435 : ret = 0;
436 : break;
437 : }
438 0 : if (timeout == 0) {
439 : ret = -ETIME;
440 : break;
441 : }
442 :
443 0 : if (signal_pending(current)) {
444 : ret = -ERESTARTSYS;
445 : break;
446 : }
447 :
448 0 : timeout = schedule_timeout(timeout);
449 : } while (1);
450 :
451 0 : __set_current_state(TASK_RUNNING);
452 0 : *fence = wait.fence;
453 :
454 0 : if (wait.node.next)
455 0 : drm_syncobj_remove_wait(syncobj, &wait);
456 :
457 : out:
458 0 : drm_syncobj_put(syncobj);
459 :
460 0 : return ret;
461 : }
462 : EXPORT_SYMBOL(drm_syncobj_find_fence);
463 :
464 : /**
465 : * drm_syncobj_free - free a sync object.
466 : * @kref: kref to free.
467 : *
468 : * Only to be called from kref_put in drm_syncobj_put.
469 : */
470 0 : void drm_syncobj_free(struct kref *kref)
471 : {
472 0 : struct drm_syncobj *syncobj = container_of(kref,
473 : struct drm_syncobj,
474 : refcount);
475 0 : drm_syncobj_replace_fence(syncobj, NULL);
476 0 : kfree(syncobj);
477 0 : }
478 : EXPORT_SYMBOL(drm_syncobj_free);
479 :
480 : /**
481 : * drm_syncobj_create - create a new syncobj
482 : * @out_syncobj: returned syncobj
483 : * @flags: DRM_SYNCOBJ_* flags
484 : * @fence: if non-NULL, the syncobj will represent this fence
485 : *
486 : * This is the first function to create a sync object. After creating, drivers
487 : * probably want to make it available to userspace, either through
488 : * drm_syncobj_get_handle() or drm_syncobj_get_fd().
489 : *
490 : * Returns 0 on success or a negative error value on failure.
491 : */
492 0 : int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
493 : struct dma_fence *fence)
494 : {
495 : int ret;
496 : struct drm_syncobj *syncobj;
497 :
498 0 : syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
499 0 : if (!syncobj)
500 : return -ENOMEM;
501 :
502 0 : kref_init(&syncobj->refcount);
503 0 : INIT_LIST_HEAD(&syncobj->cb_list);
504 0 : spin_lock_init(&syncobj->lock);
505 :
506 0 : if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
507 0 : ret = drm_syncobj_assign_null_handle(syncobj);
508 0 : if (ret < 0) {
509 0 : drm_syncobj_put(syncobj);
510 0 : return ret;
511 : }
512 : }
513 :
514 0 : if (fence)
515 0 : drm_syncobj_replace_fence(syncobj, fence);
516 :
517 0 : *out_syncobj = syncobj;
518 0 : return 0;
519 : }
520 : EXPORT_SYMBOL(drm_syncobj_create);
521 :
522 : /**
523 : * drm_syncobj_get_handle - get a handle from a syncobj
524 : * @file_private: drm file private pointer
525 : * @syncobj: Sync object to export
526 : * @handle: out parameter with the new handle
527 : *
528 : * Exports a sync object created with drm_syncobj_create() as a handle on
529 : * @file_private to userspace.
530 : *
531 : * Returns 0 on success or a negative error value on failure.
532 : */
533 0 : int drm_syncobj_get_handle(struct drm_file *file_private,
534 : struct drm_syncobj *syncobj, u32 *handle)
535 : {
536 : int ret;
537 :
538 : /* take a reference to put in the idr */
539 0 : drm_syncobj_get(syncobj);
540 :
541 0 : idr_preload(GFP_KERNEL);
542 0 : spin_lock(&file_private->syncobj_table_lock);
543 0 : ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
544 0 : spin_unlock(&file_private->syncobj_table_lock);
545 :
546 : idr_preload_end();
547 :
548 0 : if (ret < 0) {
549 0 : drm_syncobj_put(syncobj);
550 0 : return ret;
551 : }
552 :
553 0 : *handle = ret;
554 0 : return 0;
555 : }
556 : EXPORT_SYMBOL(drm_syncobj_get_handle);
557 :
558 0 : static int drm_syncobj_create_as_handle(struct drm_file *file_private,
559 : u32 *handle, uint32_t flags)
560 : {
561 : int ret;
562 : struct drm_syncobj *syncobj;
563 :
564 0 : ret = drm_syncobj_create(&syncobj, flags, NULL);
565 0 : if (ret)
566 : return ret;
567 :
568 0 : ret = drm_syncobj_get_handle(file_private, syncobj, handle);
569 0 : drm_syncobj_put(syncobj);
570 0 : return ret;
571 : }
572 :
573 0 : static int drm_syncobj_destroy(struct drm_file *file_private,
574 : u32 handle)
575 : {
576 : struct drm_syncobj *syncobj;
577 :
578 0 : spin_lock(&file_private->syncobj_table_lock);
579 0 : syncobj = idr_remove(&file_private->syncobj_idr, handle);
580 0 : spin_unlock(&file_private->syncobj_table_lock);
581 :
582 0 : if (!syncobj)
583 : return -EINVAL;
584 :
585 0 : drm_syncobj_put(syncobj);
586 0 : return 0;
587 : }
588 :
589 0 : static int drm_syncobj_file_release(struct inode *inode, struct file *file)
590 : {
591 0 : struct drm_syncobj *syncobj = file->private_data;
592 :
593 0 : drm_syncobj_put(syncobj);
594 0 : return 0;
595 : }
596 :
597 : static const struct file_operations drm_syncobj_file_fops = {
598 : .release = drm_syncobj_file_release,
599 : };
600 :
601 : /**
602 : * drm_syncobj_get_fd - get a file descriptor from a syncobj
603 : * @syncobj: Sync object to export
604 : * @p_fd: out parameter with the new file descriptor
605 : *
606 : * Exports a sync object created with drm_syncobj_create() as a file descriptor.
607 : *
608 : * Returns 0 on success or a negative error value on failure.
609 : */
610 0 : int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
611 : {
612 : struct file *file;
613 : int fd;
614 :
615 0 : fd = get_unused_fd_flags(O_CLOEXEC);
616 0 : if (fd < 0)
617 : return fd;
618 :
619 0 : file = anon_inode_getfile("syncobj_file",
620 : &drm_syncobj_file_fops,
621 : syncobj, 0);
622 0 : if (IS_ERR(file)) {
623 0 : put_unused_fd(fd);
624 0 : return PTR_ERR(file);
625 : }
626 :
627 0 : drm_syncobj_get(syncobj);
628 0 : fd_install(fd, file);
629 :
630 0 : *p_fd = fd;
631 0 : return 0;
632 : }
633 : EXPORT_SYMBOL(drm_syncobj_get_fd);
634 :
635 0 : static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
636 : u32 handle, int *p_fd)
637 : {
638 0 : struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
639 : int ret;
640 :
641 0 : if (!syncobj)
642 : return -EINVAL;
643 :
644 0 : ret = drm_syncobj_get_fd(syncobj, p_fd);
645 0 : drm_syncobj_put(syncobj);
646 0 : return ret;
647 : }
648 :
649 0 : static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
650 : int fd, u32 *handle)
651 : {
652 : struct drm_syncobj *syncobj;
653 0 : struct fd f = fdget(fd);
654 : int ret;
655 :
656 0 : if (!f.file)
657 : return -EINVAL;
658 :
659 0 : if (f.file->f_op != &drm_syncobj_file_fops) {
660 0 : fdput(f);
661 : return -EINVAL;
662 : }
663 :
664 : /* take a reference to put in the idr */
665 0 : syncobj = f.file->private_data;
666 0 : drm_syncobj_get(syncobj);
667 :
668 0 : idr_preload(GFP_KERNEL);
669 0 : spin_lock(&file_private->syncobj_table_lock);
670 0 : ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
671 0 : spin_unlock(&file_private->syncobj_table_lock);
672 : idr_preload_end();
673 :
674 0 : if (ret > 0) {
675 0 : *handle = ret;
676 0 : ret = 0;
677 : } else
678 : drm_syncobj_put(syncobj);
679 :
680 0 : fdput(f);
681 : return ret;
682 : }
683 :
684 0 : static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
685 : int fd, int handle)
686 : {
687 0 : struct dma_fence *fence = sync_file_get_fence(fd);
688 : struct drm_syncobj *syncobj;
689 :
690 0 : if (!fence)
691 : return -EINVAL;
692 :
693 0 : syncobj = drm_syncobj_find(file_private, handle);
694 0 : if (!syncobj) {
695 0 : dma_fence_put(fence);
696 0 : return -ENOENT;
697 : }
698 :
699 0 : drm_syncobj_replace_fence(syncobj, fence);
700 0 : dma_fence_put(fence);
701 0 : drm_syncobj_put(syncobj);
702 0 : return 0;
703 : }
704 :
705 0 : static int drm_syncobj_export_sync_file(struct drm_file *file_private,
706 : int handle, int *p_fd)
707 : {
708 : int ret;
709 : struct dma_fence *fence;
710 : struct sync_file *sync_file;
711 0 : int fd = get_unused_fd_flags(O_CLOEXEC);
712 :
713 0 : if (fd < 0)
714 : return fd;
715 :
716 0 : ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
717 0 : if (ret)
718 : goto err_put_fd;
719 :
720 0 : sync_file = sync_file_create(fence);
721 :
722 0 : dma_fence_put(fence);
723 :
724 0 : if (!sync_file) {
725 : ret = -EINVAL;
726 : goto err_put_fd;
727 : }
728 :
729 0 : fd_install(fd, sync_file->file);
730 :
731 0 : *p_fd = fd;
732 0 : return 0;
733 : err_put_fd:
734 0 : put_unused_fd(fd);
735 0 : return ret;
736 : }
737 : /**
738 : * drm_syncobj_open - initializes syncobj file-private structures at devnode open time
739 : * @file_private: drm file-private structure to set up
740 : *
741 : * Called at device open time, sets up the structure for handling refcounting
742 : * of sync objects.
743 : */
744 : void
745 0 : drm_syncobj_open(struct drm_file *file_private)
746 : {
747 0 : idr_init_base(&file_private->syncobj_idr, 1);
748 0 : spin_lock_init(&file_private->syncobj_table_lock);
749 0 : }
750 :
751 : static int
752 0 : drm_syncobj_release_handle(int id, void *ptr, void *data)
753 : {
754 0 : struct drm_syncobj *syncobj = ptr;
755 :
756 0 : drm_syncobj_put(syncobj);
757 0 : return 0;
758 : }
759 :
760 : /**
761 : * drm_syncobj_release - release file-private sync object resources
762 : * @file_private: drm file-private structure to clean up
763 : *
764 : * Called at close time when the filp is going away.
765 : *
766 : * Releases any remaining references on objects by this filp.
767 : */
768 : void
769 0 : drm_syncobj_release(struct drm_file *file_private)
770 : {
771 0 : idr_for_each(&file_private->syncobj_idr,
772 : &drm_syncobj_release_handle, file_private);
773 0 : idr_destroy(&file_private->syncobj_idr);
774 0 : }
775 :
776 : int
777 0 : drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
778 : struct drm_file *file_private)
779 : {
780 0 : struct drm_syncobj_create *args = data;
781 :
782 0 : if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
783 : return -EOPNOTSUPP;
784 :
785 : /* no valid flags yet */
786 0 : if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
787 : return -EINVAL;
788 :
789 0 : return drm_syncobj_create_as_handle(file_private,
790 0 : &args->handle, args->flags);
791 : }
792 :
793 : int
794 0 : drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
795 : struct drm_file *file_private)
796 : {
797 0 : struct drm_syncobj_destroy *args = data;
798 :
799 0 : if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
800 : return -EOPNOTSUPP;
801 :
802 : /* make sure padding is empty */
803 0 : if (args->pad)
804 : return -EINVAL;
805 0 : return drm_syncobj_destroy(file_private, args->handle);
806 : }
807 :
808 : int
809 0 : drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
810 : struct drm_file *file_private)
811 : {
812 0 : struct drm_syncobj_handle *args = data;
813 :
814 0 : if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
815 : return -EOPNOTSUPP;
816 :
817 0 : if (args->pad)
818 : return -EINVAL;
819 :
820 0 : if (args->flags != 0 &&
821 : args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
822 : return -EINVAL;
823 :
824 0 : if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
825 0 : return drm_syncobj_export_sync_file(file_private, args->handle,
826 0 : &args->fd);
827 :
828 0 : return drm_syncobj_handle_to_fd(file_private, args->handle,
829 0 : &args->fd);
830 : }
831 :
832 : int
833 0 : drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
834 : struct drm_file *file_private)
835 : {
836 0 : struct drm_syncobj_handle *args = data;
837 :
838 0 : if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
839 : return -EOPNOTSUPP;
840 :
841 0 : if (args->pad)
842 : return -EINVAL;
843 :
844 0 : if (args->flags != 0 &&
845 : args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
846 : return -EINVAL;
847 :
848 0 : if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
849 0 : return drm_syncobj_import_sync_file_fence(file_private,
850 : args->fd,
851 0 : args->handle);
852 :
853 0 : return drm_syncobj_fd_to_handle(file_private, args->fd,
854 0 : &args->handle);
855 : }
856 :
857 0 : static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
858 : struct drm_syncobj_transfer *args)
859 : {
860 0 : struct drm_syncobj *timeline_syncobj = NULL;
861 : struct dma_fence *fence, *tmp;
862 : struct dma_fence_chain *chain;
863 : int ret;
864 :
865 0 : timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle);
866 0 : if (!timeline_syncobj) {
867 : return -ENOENT;
868 : }
869 0 : ret = drm_syncobj_find_fence(file_private, args->src_handle,
870 0 : args->src_point, args->flags,
871 : &tmp);
872 0 : if (ret)
873 : goto err_put_timeline;
874 :
875 0 : fence = dma_fence_unwrap_merge(tmp);
876 0 : dma_fence_put(tmp);
877 0 : if (!fence) {
878 : ret = -ENOMEM;
879 : goto err_put_timeline;
880 : }
881 :
882 0 : chain = dma_fence_chain_alloc();
883 0 : if (!chain) {
884 : ret = -ENOMEM;
885 : goto err_free_fence;
886 : }
887 :
888 0 : drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point);
889 : err_free_fence:
890 : dma_fence_put(fence);
891 : err_put_timeline:
892 0 : drm_syncobj_put(timeline_syncobj);
893 :
894 0 : return ret;
895 : }
896 :
897 : static int
898 0 : drm_syncobj_transfer_to_binary(struct drm_file *file_private,
899 : struct drm_syncobj_transfer *args)
900 : {
901 0 : struct drm_syncobj *binary_syncobj = NULL;
902 : struct dma_fence *fence;
903 : int ret;
904 :
905 0 : binary_syncobj = drm_syncobj_find(file_private, args->dst_handle);
906 0 : if (!binary_syncobj)
907 : return -ENOENT;
908 0 : ret = drm_syncobj_find_fence(file_private, args->src_handle,
909 0 : args->src_point, args->flags, &fence);
910 0 : if (ret)
911 : goto err;
912 0 : drm_syncobj_replace_fence(binary_syncobj, fence);
913 0 : dma_fence_put(fence);
914 : err:
915 0 : drm_syncobj_put(binary_syncobj);
916 :
917 0 : return ret;
918 : }
919 : int
920 0 : drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data,
921 : struct drm_file *file_private)
922 : {
923 0 : struct drm_syncobj_transfer *args = data;
924 : int ret;
925 :
926 0 : if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
927 : return -EOPNOTSUPP;
928 :
929 0 : if (args->pad)
930 : return -EINVAL;
931 :
932 0 : if (args->dst_point)
933 0 : ret = drm_syncobj_transfer_to_timeline(file_private, args);
934 : else
935 0 : ret = drm_syncobj_transfer_to_binary(file_private, args);
936 :
937 : return ret;
938 : }
939 :
940 0 : static void syncobj_wait_fence_func(struct dma_fence *fence,
941 : struct dma_fence_cb *cb)
942 : {
943 0 : struct syncobj_wait_entry *wait =
944 0 : container_of(cb, struct syncobj_wait_entry, fence_cb);
945 :
946 0 : wake_up_process(wait->task);
947 0 : }
948 :
949 0 : static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
950 : struct syncobj_wait_entry *wait)
951 : {
952 : struct dma_fence *fence;
953 :
954 : /* This happens inside the syncobj lock */
955 0 : fence = rcu_dereference_protected(syncobj->fence,
956 : lockdep_is_held(&syncobj->lock));
957 0 : dma_fence_get(fence);
958 0 : if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
959 0 : dma_fence_put(fence);
960 0 : return;
961 0 : } else if (!fence) {
962 0 : wait->fence = dma_fence_get_stub();
963 : } else {
964 0 : wait->fence = fence;
965 : }
966 :
967 0 : wake_up_process(wait->task);
968 0 : list_del_init(&wait->node);
969 : }
970 :
971 0 : static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
972 : void __user *user_points,
973 : uint32_t count,
974 : uint32_t flags,
975 : signed long timeout,
976 : uint32_t *idx)
977 : {
978 : struct syncobj_wait_entry *entries;
979 : struct dma_fence *fence;
980 : uint64_t *points;
981 : uint32_t signaled_count, i;
982 :
983 0 : if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)
984 : lockdep_assert_none_held_once();
985 :
986 0 : points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
987 0 : if (points == NULL)
988 : return -ENOMEM;
989 :
990 0 : if (!user_points) {
991 0 : memset(points, 0, count * sizeof(uint64_t));
992 :
993 0 : } else if (copy_from_user(points, user_points,
994 : sizeof(uint64_t) * count)) {
995 : timeout = -EFAULT;
996 : goto err_free_points;
997 : }
998 :
999 0 : entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
1000 0 : if (!entries) {
1001 : timeout = -ENOMEM;
1002 : goto err_free_points;
1003 : }
1004 : /* Walk the list of sync objects and initialize entries. We do
1005 : * this up-front so that we can properly return -EINVAL if there is
1006 : * a syncobj with a missing fence and then never have the chance of
1007 : * returning -EINVAL again.
1008 : */
1009 : signaled_count = 0;
1010 0 : for (i = 0; i < count; ++i) {
1011 : struct dma_fence *fence;
1012 :
1013 0 : entries[i].task = current;
1014 0 : entries[i].point = points[i];
1015 0 : fence = drm_syncobj_fence_get(syncobjs[i]);
1016 0 : if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
1017 0 : dma_fence_put(fence);
1018 0 : if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
1019 0 : continue;
1020 : } else {
1021 0 : timeout = -EINVAL;
1022 0 : goto cleanup_entries;
1023 : }
1024 : }
1025 :
1026 0 : if (fence)
1027 0 : entries[i].fence = fence;
1028 : else
1029 0 : entries[i].fence = dma_fence_get_stub();
1030 :
1031 0 : if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
1032 0 : dma_fence_is_signaled(entries[i].fence)) {
1033 0 : if (signaled_count == 0 && idx)
1034 0 : *idx = i;
1035 0 : signaled_count++;
1036 : }
1037 : }
1038 :
1039 0 : if (signaled_count == count ||
1040 0 : (signaled_count > 0 &&
1041 0 : !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
1042 : goto cleanup_entries;
1043 :
1044 : /* There's a very annoying laxness in the dma_fence API here, in
1045 : * that backends are not required to automatically report when a
1046 : * fence is signaled prior to fence->ops->enable_signaling() being
1047 : * called. So here if we fail to match signaled_count, we need to
1048 : * fallthough and try a 0 timeout wait!
1049 : */
1050 :
1051 0 : if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
1052 0 : for (i = 0; i < count; ++i)
1053 0 : drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
1054 : }
1055 :
1056 : do {
1057 0 : set_current_state(TASK_INTERRUPTIBLE);
1058 :
1059 0 : signaled_count = 0;
1060 0 : for (i = 0; i < count; ++i) {
1061 0 : fence = entries[i].fence;
1062 0 : if (!fence)
1063 0 : continue;
1064 :
1065 0 : if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
1066 0 : dma_fence_is_signaled(fence) ||
1067 0 : (!entries[i].fence_cb.func &&
1068 0 : dma_fence_add_callback(fence,
1069 : &entries[i].fence_cb,
1070 : syncobj_wait_fence_func))) {
1071 : /* The fence has been signaled */
1072 0 : if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
1073 0 : signaled_count++;
1074 : } else {
1075 0 : if (idx)
1076 0 : *idx = i;
1077 : goto done_waiting;
1078 : }
1079 : }
1080 : }
1081 :
1082 0 : if (signaled_count == count)
1083 : goto done_waiting;
1084 :
1085 0 : if (timeout == 0) {
1086 : timeout = -ETIME;
1087 : goto done_waiting;
1088 : }
1089 :
1090 0 : if (signal_pending(current)) {
1091 : timeout = -ERESTARTSYS;
1092 : goto done_waiting;
1093 : }
1094 :
1095 0 : timeout = schedule_timeout(timeout);
1096 : } while (1);
1097 :
1098 : done_waiting:
1099 0 : __set_current_state(TASK_RUNNING);
1100 :
1101 : cleanup_entries:
1102 0 : for (i = 0; i < count; ++i) {
1103 0 : drm_syncobj_remove_wait(syncobjs[i], &entries[i]);
1104 0 : if (entries[i].fence_cb.func)
1105 0 : dma_fence_remove_callback(entries[i].fence,
1106 : &entries[i].fence_cb);
1107 0 : dma_fence_put(entries[i].fence);
1108 : }
1109 0 : kfree(entries);
1110 :
1111 : err_free_points:
1112 0 : kfree(points);
1113 :
1114 0 : return timeout;
1115 : }
1116 :
1117 : /**
1118 : * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
1119 : *
1120 : * @timeout_nsec: timeout nsec component in ns, 0 for poll
1121 : *
1122 : * Calculate the timeout in jiffies from an absolute time in sec/nsec.
1123 : */
1124 0 : signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
1125 : {
1126 : ktime_t abs_timeout, now;
1127 : u64 timeout_ns, timeout_jiffies64;
1128 :
1129 : /* make 0 timeout means poll - absolute 0 doesn't seem valid */
1130 0 : if (timeout_nsec == 0)
1131 : return 0;
1132 :
1133 0 : abs_timeout = ns_to_ktime(timeout_nsec);
1134 0 : now = ktime_get();
1135 :
1136 0 : if (!ktime_after(abs_timeout, now))
1137 : return 0;
1138 :
1139 0 : timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
1140 :
1141 0 : timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
1142 : /* clamp timeout to avoid infinite timeout */
1143 0 : if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
1144 : return MAX_SCHEDULE_TIMEOUT - 1;
1145 :
1146 0 : return timeout_jiffies64 + 1;
1147 : }
1148 : EXPORT_SYMBOL(drm_timeout_abs_to_jiffies);
1149 :
1150 0 : static int drm_syncobj_array_wait(struct drm_device *dev,
1151 : struct drm_file *file_private,
1152 : struct drm_syncobj_wait *wait,
1153 : struct drm_syncobj_timeline_wait *timeline_wait,
1154 : struct drm_syncobj **syncobjs, bool timeline)
1155 : {
1156 0 : signed long timeout = 0;
1157 0 : uint32_t first = ~0;
1158 :
1159 0 : if (!timeline) {
1160 0 : timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
1161 0 : timeout = drm_syncobj_array_wait_timeout(syncobjs,
1162 : NULL,
1163 : wait->count_handles,
1164 : wait->flags,
1165 : timeout, &first);
1166 0 : if (timeout < 0)
1167 0 : return timeout;
1168 0 : wait->first_signaled = first;
1169 : } else {
1170 0 : timeout = drm_timeout_abs_to_jiffies(timeline_wait->timeout_nsec);
1171 0 : timeout = drm_syncobj_array_wait_timeout(syncobjs,
1172 0 : u64_to_user_ptr(timeline_wait->points),
1173 : timeline_wait->count_handles,
1174 : timeline_wait->flags,
1175 : timeout, &first);
1176 0 : if (timeout < 0)
1177 0 : return timeout;
1178 0 : timeline_wait->first_signaled = first;
1179 : }
1180 : return 0;
1181 : }
1182 :
1183 0 : static int drm_syncobj_array_find(struct drm_file *file_private,
1184 : void __user *user_handles,
1185 : uint32_t count_handles,
1186 : struct drm_syncobj ***syncobjs_out)
1187 : {
1188 : uint32_t i, *handles;
1189 : struct drm_syncobj **syncobjs;
1190 : int ret;
1191 :
1192 0 : handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
1193 0 : if (handles == NULL)
1194 : return -ENOMEM;
1195 :
1196 0 : if (copy_from_user(handles, user_handles,
1197 : sizeof(uint32_t) * count_handles)) {
1198 : ret = -EFAULT;
1199 : goto err_free_handles;
1200 : }
1201 :
1202 0 : syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
1203 0 : if (syncobjs == NULL) {
1204 : ret = -ENOMEM;
1205 : goto err_free_handles;
1206 : }
1207 :
1208 0 : for (i = 0; i < count_handles; i++) {
1209 0 : syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
1210 0 : if (!syncobjs[i]) {
1211 : ret = -ENOENT;
1212 : goto err_put_syncobjs;
1213 : }
1214 : }
1215 :
1216 0 : kfree(handles);
1217 0 : *syncobjs_out = syncobjs;
1218 0 : return 0;
1219 :
1220 : err_put_syncobjs:
1221 0 : while (i-- > 0)
1222 0 : drm_syncobj_put(syncobjs[i]);
1223 0 : kfree(syncobjs);
1224 : err_free_handles:
1225 0 : kfree(handles);
1226 :
1227 0 : return ret;
1228 : }
1229 :
1230 0 : static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
1231 : uint32_t count)
1232 : {
1233 : uint32_t i;
1234 :
1235 0 : for (i = 0; i < count; i++)
1236 0 : drm_syncobj_put(syncobjs[i]);
1237 0 : kfree(syncobjs);
1238 0 : }
1239 :
1240 : int
1241 0 : drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
1242 : struct drm_file *file_private)
1243 : {
1244 0 : struct drm_syncobj_wait *args = data;
1245 : struct drm_syncobj **syncobjs;
1246 0 : int ret = 0;
1247 :
1248 0 : if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1249 : return -EOPNOTSUPP;
1250 :
1251 0 : if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
1252 : DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
1253 : return -EINVAL;
1254 :
1255 0 : if (args->count_handles == 0)
1256 : return -EINVAL;
1257 :
1258 0 : ret = drm_syncobj_array_find(file_private,
1259 0 : u64_to_user_ptr(args->handles),
1260 : args->count_handles,
1261 : &syncobjs);
1262 0 : if (ret < 0)
1263 : return ret;
1264 :
1265 0 : ret = drm_syncobj_array_wait(dev, file_private,
1266 : args, NULL, syncobjs, false);
1267 :
1268 0 : drm_syncobj_array_free(syncobjs, args->count_handles);
1269 :
1270 0 : return ret;
1271 : }
1272 :
1273 : int
1274 0 : drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
1275 : struct drm_file *file_private)
1276 : {
1277 0 : struct drm_syncobj_timeline_wait *args = data;
1278 : struct drm_syncobj **syncobjs;
1279 0 : int ret = 0;
1280 :
1281 0 : if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1282 : return -EOPNOTSUPP;
1283 :
1284 0 : if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
1285 : DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1286 : DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
1287 : return -EINVAL;
1288 :
1289 0 : if (args->count_handles == 0)
1290 : return -EINVAL;
1291 :
1292 0 : ret = drm_syncobj_array_find(file_private,
1293 0 : u64_to_user_ptr(args->handles),
1294 : args->count_handles,
1295 : &syncobjs);
1296 0 : if (ret < 0)
1297 : return ret;
1298 :
1299 0 : ret = drm_syncobj_array_wait(dev, file_private,
1300 : NULL, args, syncobjs, true);
1301 :
1302 0 : drm_syncobj_array_free(syncobjs, args->count_handles);
1303 :
1304 0 : return ret;
1305 : }
1306 :
1307 :
1308 : int
1309 0 : drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
1310 : struct drm_file *file_private)
1311 : {
1312 0 : struct drm_syncobj_array *args = data;
1313 : struct drm_syncobj **syncobjs;
1314 : uint32_t i;
1315 : int ret;
1316 :
1317 0 : if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1318 : return -EOPNOTSUPP;
1319 :
1320 0 : if (args->pad != 0)
1321 : return -EINVAL;
1322 :
1323 0 : if (args->count_handles == 0)
1324 : return -EINVAL;
1325 :
1326 0 : ret = drm_syncobj_array_find(file_private,
1327 0 : u64_to_user_ptr(args->handles),
1328 : args->count_handles,
1329 : &syncobjs);
1330 0 : if (ret < 0)
1331 : return ret;
1332 :
1333 0 : for (i = 0; i < args->count_handles; i++)
1334 0 : drm_syncobj_replace_fence(syncobjs[i], NULL);
1335 :
1336 0 : drm_syncobj_array_free(syncobjs, args->count_handles);
1337 :
1338 0 : return 0;
1339 : }
1340 :
1341 : int
1342 0 : drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
1343 : struct drm_file *file_private)
1344 : {
1345 0 : struct drm_syncobj_array *args = data;
1346 : struct drm_syncobj **syncobjs;
1347 : uint32_t i;
1348 : int ret;
1349 :
1350 0 : if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1351 : return -EOPNOTSUPP;
1352 :
1353 0 : if (args->pad != 0)
1354 : return -EINVAL;
1355 :
1356 0 : if (args->count_handles == 0)
1357 : return -EINVAL;
1358 :
1359 0 : ret = drm_syncobj_array_find(file_private,
1360 0 : u64_to_user_ptr(args->handles),
1361 : args->count_handles,
1362 : &syncobjs);
1363 0 : if (ret < 0)
1364 : return ret;
1365 :
1366 0 : for (i = 0; i < args->count_handles; i++) {
1367 0 : ret = drm_syncobj_assign_null_handle(syncobjs[i]);
1368 0 : if (ret < 0)
1369 : break;
1370 : }
1371 :
1372 0 : drm_syncobj_array_free(syncobjs, args->count_handles);
1373 :
1374 0 : return ret;
1375 : }
1376 :
1377 : int
1378 0 : drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
1379 : struct drm_file *file_private)
1380 : {
1381 0 : struct drm_syncobj_timeline_array *args = data;
1382 : struct drm_syncobj **syncobjs;
1383 : struct dma_fence_chain **chains;
1384 : uint64_t *points;
1385 : uint32_t i, j;
1386 : int ret;
1387 :
1388 0 : if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1389 : return -EOPNOTSUPP;
1390 :
1391 0 : if (args->flags != 0)
1392 : return -EINVAL;
1393 :
1394 0 : if (args->count_handles == 0)
1395 : return -EINVAL;
1396 :
1397 0 : ret = drm_syncobj_array_find(file_private,
1398 0 : u64_to_user_ptr(args->handles),
1399 : args->count_handles,
1400 : &syncobjs);
1401 0 : if (ret < 0)
1402 : return ret;
1403 :
1404 0 : points = kmalloc_array(args->count_handles, sizeof(*points),
1405 : GFP_KERNEL);
1406 0 : if (!points) {
1407 : ret = -ENOMEM;
1408 : goto out;
1409 : }
1410 0 : if (!u64_to_user_ptr(args->points)) {
1411 0 : memset(points, 0, args->count_handles * sizeof(uint64_t));
1412 0 : } else if (copy_from_user(points, u64_to_user_ptr(args->points),
1413 0 : sizeof(uint64_t) * args->count_handles)) {
1414 : ret = -EFAULT;
1415 : goto err_points;
1416 : }
1417 :
1418 0 : chains = kmalloc_array(args->count_handles, sizeof(void *), GFP_KERNEL);
1419 0 : if (!chains) {
1420 : ret = -ENOMEM;
1421 : goto err_points;
1422 : }
1423 0 : for (i = 0; i < args->count_handles; i++) {
1424 0 : chains[i] = dma_fence_chain_alloc();
1425 0 : if (!chains[i]) {
1426 0 : for (j = 0; j < i; j++)
1427 0 : dma_fence_chain_free(chains[j]);
1428 : ret = -ENOMEM;
1429 : goto err_chains;
1430 : }
1431 : }
1432 :
1433 0 : for (i = 0; i < args->count_handles; i++) {
1434 0 : struct dma_fence *fence = dma_fence_get_stub();
1435 :
1436 0 : drm_syncobj_add_point(syncobjs[i], chains[i],
1437 0 : fence, points[i]);
1438 0 : dma_fence_put(fence);
1439 : }
1440 : err_chains:
1441 0 : kfree(chains);
1442 : err_points:
1443 0 : kfree(points);
1444 : out:
1445 0 : drm_syncobj_array_free(syncobjs, args->count_handles);
1446 :
1447 0 : return ret;
1448 : }
1449 :
1450 0 : int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
1451 : struct drm_file *file_private)
1452 : {
1453 0 : struct drm_syncobj_timeline_array *args = data;
1454 : struct drm_syncobj **syncobjs;
1455 0 : uint64_t __user *points = u64_to_user_ptr(args->points);
1456 : uint32_t i;
1457 : int ret;
1458 :
1459 0 : if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1460 : return -EOPNOTSUPP;
1461 :
1462 0 : if (args->flags & ~DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED)
1463 : return -EINVAL;
1464 :
1465 0 : if (args->count_handles == 0)
1466 : return -EINVAL;
1467 :
1468 0 : ret = drm_syncobj_array_find(file_private,
1469 0 : u64_to_user_ptr(args->handles),
1470 : args->count_handles,
1471 : &syncobjs);
1472 0 : if (ret < 0)
1473 : return ret;
1474 :
1475 0 : for (i = 0; i < args->count_handles; i++) {
1476 : struct dma_fence_chain *chain;
1477 : struct dma_fence *fence;
1478 : uint64_t point;
1479 :
1480 0 : fence = drm_syncobj_fence_get(syncobjs[i]);
1481 0 : chain = to_dma_fence_chain(fence);
1482 0 : if (chain) {
1483 0 : struct dma_fence *iter, *last_signaled =
1484 : dma_fence_get(fence);
1485 :
1486 0 : if (args->flags &
1487 : DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED) {
1488 0 : point = fence->seqno;
1489 : } else {
1490 0 : dma_fence_chain_for_each(iter, fence) {
1491 0 : if (iter->context != fence->context) {
1492 : dma_fence_put(iter);
1493 : /* It is most likely that timeline has
1494 : * unorder points. */
1495 : break;
1496 : }
1497 0 : dma_fence_put(last_signaled);
1498 0 : last_signaled = dma_fence_get(iter);
1499 : }
1500 0 : point = dma_fence_is_signaled(last_signaled) ?
1501 0 : last_signaled->seqno :
1502 0 : to_dma_fence_chain(last_signaled)->prev_seqno;
1503 : }
1504 : dma_fence_put(last_signaled);
1505 : } else {
1506 0 : point = 0;
1507 : }
1508 0 : dma_fence_put(fence);
1509 0 : ret = copy_to_user(&points[i], &point, sizeof(uint64_t));
1510 0 : ret = ret ? -EFAULT : 0;
1511 0 : if (ret)
1512 : break;
1513 : }
1514 0 : drm_syncobj_array_free(syncobjs, args->count_handles);
1515 :
1516 0 : return ret;
1517 : }
|