LCOV - code coverage report
Current view: top level - include/linux - dma-resv.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 11 0.0 %
Date: 2023-04-06 08:38:28 Functions: 0 0 -

          Line data    Source code
       1             : /*
       2             :  * Header file for reservations for dma-buf and ttm
       3             :  *
       4             :  * Copyright(C) 2011 Linaro Limited. All rights reserved.
       5             :  * Copyright (C) 2012-2013 Canonical Ltd
       6             :  * Copyright (C) 2012 Texas Instruments
       7             :  *
       8             :  * Authors:
       9             :  * Rob Clark <robdclark@gmail.com>
      10             :  * Maarten Lankhorst <maarten.lankhorst@canonical.com>
      11             :  * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
      12             :  *
      13             :  * Based on bo.c which bears the following copyright notice,
      14             :  * but is dual licensed:
      15             :  *
      16             :  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
      17             :  * All Rights Reserved.
      18             :  *
      19             :  * Permission is hereby granted, free of charge, to any person obtaining a
      20             :  * copy of this software and associated documentation files (the
      21             :  * "Software"), to deal in the Software without restriction, including
      22             :  * without limitation the rights to use, copy, modify, merge, publish,
      23             :  * distribute, sub license, and/or sell copies of the Software, and to
      24             :  * permit persons to whom the Software is furnished to do so, subject to
      25             :  * the following conditions:
      26             :  *
      27             :  * The above copyright notice and this permission notice (including the
      28             :  * next paragraph) shall be included in all copies or substantial portions
      29             :  * of the Software.
      30             :  *
      31             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      32             :  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      33             :  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
      34             :  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
      35             :  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
      36             :  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
      37             :  * USE OR OTHER DEALINGS IN THE SOFTWARE.
      38             :  */
      39             : #ifndef _LINUX_RESERVATION_H
      40             : #define _LINUX_RESERVATION_H
      41             : 
      42             : #include <linux/ww_mutex.h>
      43             : #include <linux/dma-fence.h>
      44             : #include <linux/slab.h>
      45             : #include <linux/seqlock.h>
      46             : #include <linux/rcupdate.h>
      47             : 
      48             : extern struct ww_class reservation_ww_class;
      49             : 
      50             : struct dma_resv_list;
      51             : 
      52             : /**
      53             :  * enum dma_resv_usage - how the fences from a dma_resv obj are used
      54             :  *
      55             :  * This enum describes the different use cases for a dma_resv object and
      56             :  * controls which fences are returned when queried.
      57             :  *
      58             :  * An important fact is that there is the order KERNEL<WRITE<READ<BOOKKEEP and
      59             :  * when the dma_resv object is asked for fences for one use case the fences
      60             :  * for the lower use case are returned as well.
      61             :  *
      62             :  * For example when asking for WRITE fences then the KERNEL fences are returned
      63             :  * as well. Similar when asked for READ fences then both WRITE and KERNEL
      64             :  * fences are returned as well.
      65             :  *
      66             :  * Already used fences can be promoted in the sense that a fence with
      67             :  * DMA_RESV_USAGE_BOOKKEEP could become DMA_RESV_USAGE_READ by adding it again
      68             :  * with this usage. But fences can never be degraded in the sense that a fence
      69             :  * with DMA_RESV_USAGE_WRITE could become DMA_RESV_USAGE_READ.
      70             :  */
      71             : enum dma_resv_usage {
      72             :         /**
      73             :          * @DMA_RESV_USAGE_KERNEL: For in kernel memory management only.
      74             :          *
      75             :          * This should only be used for things like copying or clearing memory
      76             :          * with a DMA hardware engine for the purpose of kernel memory
      77             :          * management.
      78             :          *
      79             :          * Drivers *always* must wait for those fences before accessing the
      80             :          * resource protected by the dma_resv object. The only exception for
      81             :          * that is when the resource is known to be locked down in place by
      82             :          * pinning it previously.
      83             :          */
      84             :         DMA_RESV_USAGE_KERNEL,
      85             : 
      86             :         /**
      87             :          * @DMA_RESV_USAGE_WRITE: Implicit write synchronization.
      88             :          *
      89             :          * This should only be used for userspace command submissions which add
      90             :          * an implicit write dependency.
      91             :          */
      92             :         DMA_RESV_USAGE_WRITE,
      93             : 
      94             :         /**
      95             :          * @DMA_RESV_USAGE_READ: Implicit read synchronization.
      96             :          *
      97             :          * This should only be used for userspace command submissions which add
      98             :          * an implicit read dependency.
      99             :          */
     100             :         DMA_RESV_USAGE_READ,
     101             : 
     102             :         /**
     103             :          * @DMA_RESV_USAGE_BOOKKEEP: No implicit sync.
     104             :          *
     105             :          * This should be used by submissions which don't want to participate in
     106             :          * any implicit synchronization.
     107             :          *
     108             :          * The most common case are preemption fences, page table updates, TLB
     109             :          * flushes as well as explicit synced user submissions.
     110             :          *
     111             :          * Explicit synced user user submissions can be promoted to
     112             :          * DMA_RESV_USAGE_READ or DMA_RESV_USAGE_WRITE as needed using
     113             :          * dma_buf_import_sync_file() when implicit synchronization should
     114             :          * become necessary after initial adding of the fence.
     115             :          */
     116             :         DMA_RESV_USAGE_BOOKKEEP
     117             : };
     118             : 
     119             : /**
     120             :  * dma_resv_usage_rw - helper for implicit sync
     121             :  * @write: true if we create a new implicit sync write
     122             :  *
     123             :  * This returns the implicit synchronization usage for write or read accesses,
     124             :  * see enum dma_resv_usage and &dma_buf.resv.
     125             :  */
     126             : static inline enum dma_resv_usage dma_resv_usage_rw(bool write)
     127             : {
     128             :         /* This looks confusing at first sight, but is indeed correct.
     129             :          *
     130             :          * The rational is that new write operations needs to wait for the
     131             :          * existing read and write operations to finish.
     132             :          * But a new read operation only needs to wait for the existing write
     133             :          * operations to finish.
     134             :          */
     135           0 :         return write ? DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE;
     136             : }
     137             : 
     138             : /**
     139             :  * struct dma_resv - a reservation object manages fences for a buffer
     140             :  *
     141             :  * This is a container for dma_fence objects which needs to handle multiple use
     142             :  * cases.
     143             :  *
     144             :  * One use is to synchronize cross-driver access to a struct dma_buf, either for
     145             :  * dynamic buffer management or just to handle implicit synchronization between
     146             :  * different users of the buffer in userspace. See &dma_buf.resv for a more
     147             :  * in-depth discussion.
     148             :  *
     149             :  * The other major use is to manage access and locking within a driver in a
     150             :  * buffer based memory manager. struct ttm_buffer_object is the canonical
     151             :  * example here, since this is where reservation objects originated from. But
     152             :  * use in drivers is spreading and some drivers also manage struct
     153             :  * drm_gem_object with the same scheme.
     154             :  */
     155             : struct dma_resv {
     156             :         /**
     157             :          * @lock:
     158             :          *
     159             :          * Update side lock. Don't use directly, instead use the wrapper
     160             :          * functions like dma_resv_lock() and dma_resv_unlock().
     161             :          *
     162             :          * Drivers which use the reservation object to manage memory dynamically
     163             :          * also use this lock to protect buffer object state like placement,
     164             :          * allocation policies or throughout command submission.
     165             :          */
     166             :         struct ww_mutex lock;
     167             : 
     168             :         /**
     169             :          * @fences:
     170             :          *
     171             :          * Array of fences which where added to the dma_resv object
     172             :          *
     173             :          * A new fence is added by calling dma_resv_add_fence(). Since this
     174             :          * often needs to be done past the point of no return in command
     175             :          * submission it cannot fail, and therefore sufficient slots need to be
     176             :          * reserved by calling dma_resv_reserve_fences().
     177             :          */
     178             :         struct dma_resv_list __rcu *fences;
     179             : };
     180             : 
     181             : /**
     182             :  * struct dma_resv_iter - current position into the dma_resv fences
     183             :  *
     184             :  * Don't touch this directly in the driver, use the accessor function instead.
     185             :  *
     186             :  * IMPORTANT
     187             :  *
     188             :  * When using the lockless iterators like dma_resv_iter_next_unlocked() or
     189             :  * dma_resv_for_each_fence_unlocked() beware that the iterator can be restarted.
     190             :  * Code which accumulates statistics or similar needs to check for this with
     191             :  * dma_resv_iter_is_restarted().
     192             :  */
     193             : struct dma_resv_iter {
     194             :         /** @obj: The dma_resv object we iterate over */
     195             :         struct dma_resv *obj;
     196             : 
     197             :         /** @usage: Return fences with this usage or lower. */
     198             :         enum dma_resv_usage usage;
     199             : 
     200             :         /** @fence: the currently handled fence */
     201             :         struct dma_fence *fence;
     202             : 
     203             :         /** @fence_usage: the usage of the current fence */
     204             :         enum dma_resv_usage fence_usage;
     205             : 
     206             :         /** @index: index into the shared fences */
     207             :         unsigned int index;
     208             : 
     209             :         /** @fences: the shared fences; private, *MUST* not dereference  */
     210             :         struct dma_resv_list *fences;
     211             : 
     212             :         /** @num_fences: number of fences */
     213             :         unsigned int num_fences;
     214             : 
     215             :         /** @is_restarted: true if this is the first returned fence */
     216             :         bool is_restarted;
     217             : };
     218             : 
     219             : struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor);
     220             : struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor);
     221             : struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor);
     222             : struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor);
     223             : 
     224             : /**
     225             :  * dma_resv_iter_begin - initialize a dma_resv_iter object
     226             :  * @cursor: The dma_resv_iter object to initialize
     227             :  * @obj: The dma_resv object which we want to iterate over
     228             :  * @usage: controls which fences to include, see enum dma_resv_usage.
     229             :  */
     230             : static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor,
     231             :                                        struct dma_resv *obj,
     232             :                                        enum dma_resv_usage usage)
     233             : {
     234           0 :         cursor->obj = obj;
     235           0 :         cursor->usage = usage;
     236           0 :         cursor->fence = NULL;
     237             : }
     238             : 
     239             : /**
     240             :  * dma_resv_iter_end - cleanup a dma_resv_iter object
     241             :  * @cursor: the dma_resv_iter object which should be cleaned up
     242             :  *
     243             :  * Make sure that the reference to the fence in the cursor is properly
     244             :  * dropped.
     245             :  */
     246             : static inline void dma_resv_iter_end(struct dma_resv_iter *cursor)
     247             : {
     248           0 :         dma_fence_put(cursor->fence);
     249             : }
     250             : 
     251             : /**
     252             :  * dma_resv_iter_usage - Return the usage of the current fence
     253             :  * @cursor: the cursor of the current position
     254             :  *
     255             :  * Returns the usage of the currently processed fence.
     256             :  */
     257             : static inline enum dma_resv_usage
     258             : dma_resv_iter_usage(struct dma_resv_iter *cursor)
     259             : {
     260             :         return cursor->fence_usage;
     261             : }
     262             : 
     263             : /**
     264             :  * dma_resv_iter_is_restarted - test if this is the first fence after a restart
     265             :  * @cursor: the cursor with the current position
     266             :  *
     267             :  * Return true if this is the first fence in an iteration after a restart.
     268             :  */
     269             : static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
     270             : {
     271             :         return cursor->is_restarted;
     272             : }
     273             : 
     274             : /**
     275             :  * dma_resv_for_each_fence_unlocked - unlocked fence iterator
     276             :  * @cursor: a struct dma_resv_iter pointer
     277             :  * @fence: the current fence
     278             :  *
     279             :  * Iterate over the fences in a struct dma_resv object without holding the
     280             :  * &dma_resv.lock and using RCU instead. The cursor needs to be initialized
     281             :  * with dma_resv_iter_begin() and cleaned up with dma_resv_iter_end(). Inside
     282             :  * the iterator a reference to the dma_fence is held and the RCU lock dropped.
     283             :  *
     284             :  * Beware that the iterator can be restarted when the struct dma_resv for
     285             :  * @cursor is modified. Code which accumulates statistics or similar needs to
     286             :  * check for this with dma_resv_iter_is_restarted(). For this reason prefer the
     287             :  * lock iterator dma_resv_for_each_fence() whenever possible.
     288             :  */
     289             : #define dma_resv_for_each_fence_unlocked(cursor, fence)                 \
     290             :         for (fence = dma_resv_iter_first_unlocked(cursor);              \
     291             :              fence; fence = dma_resv_iter_next_unlocked(cursor))
     292             : 
     293             : /**
     294             :  * dma_resv_for_each_fence - fence iterator
     295             :  * @cursor: a struct dma_resv_iter pointer
     296             :  * @obj: a dma_resv object pointer
     297             :  * @usage: controls which fences to return
     298             :  * @fence: the current fence
     299             :  *
     300             :  * Iterate over the fences in a struct dma_resv object while holding the
     301             :  * &dma_resv.lock. @all_fences controls if the shared fences are returned as
     302             :  * well. The cursor initialisation is part of the iterator and the fence stays
     303             :  * valid as long as the lock is held and so no extra reference to the fence is
     304             :  * taken.
     305             :  */
     306             : #define dma_resv_for_each_fence(cursor, obj, usage, fence)      \
     307             :         for (dma_resv_iter_begin(cursor, obj, usage),   \
     308             :              fence = dma_resv_iter_first(cursor); fence;        \
     309             :              fence = dma_resv_iter_next(cursor))
     310             : 
     311             : #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
     312             : #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
     313             : 
     314             : #ifdef CONFIG_DEBUG_MUTEXES
     315             : void dma_resv_reset_max_fences(struct dma_resv *obj);
     316             : #else
     317             : static inline void dma_resv_reset_max_fences(struct dma_resv *obj) {}
     318             : #endif
     319             : 
     320             : /**
     321             :  * dma_resv_lock - lock the reservation object
     322             :  * @obj: the reservation object
     323             :  * @ctx: the locking context
     324             :  *
     325             :  * Locks the reservation object for exclusive access and modification. Note,
     326             :  * that the lock is only against other writers, readers will run concurrently
     327             :  * with a writer under RCU. The seqlock is used to notify readers if they
     328             :  * overlap with a writer.
     329             :  *
     330             :  * As the reservation object may be locked by multiple parties in an
     331             :  * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
     332             :  * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
     333             :  * object may be locked by itself by passing NULL as @ctx.
     334             :  *
     335             :  * When a die situation is indicated by returning -EDEADLK all locks held by
     336             :  * @ctx must be unlocked and then dma_resv_lock_slow() called on @obj.
     337             :  *
     338             :  * Unlocked by calling dma_resv_unlock().
     339             :  *
     340             :  * See also dma_resv_lock_interruptible() for the interruptible variant.
     341             :  */
     342             : static inline int dma_resv_lock(struct dma_resv *obj,
     343             :                                 struct ww_acquire_ctx *ctx)
     344             : {
     345           0 :         return ww_mutex_lock(&obj->lock, ctx);
     346             : }
     347             : 
     348             : /**
     349             :  * dma_resv_lock_interruptible - lock the reservation object
     350             :  * @obj: the reservation object
     351             :  * @ctx: the locking context
     352             :  *
     353             :  * Locks the reservation object interruptible for exclusive access and
     354             :  * modification. Note, that the lock is only against other writers, readers
     355             :  * will run concurrently with a writer under RCU. The seqlock is used to
     356             :  * notify readers if they overlap with a writer.
     357             :  *
     358             :  * As the reservation object may be locked by multiple parties in an
     359             :  * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
     360             :  * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
     361             :  * object may be locked by itself by passing NULL as @ctx.
     362             :  *
     363             :  * When a die situation is indicated by returning -EDEADLK all locks held by
     364             :  * @ctx must be unlocked and then dma_resv_lock_slow_interruptible() called on
     365             :  * @obj.
     366             :  *
     367             :  * Unlocked by calling dma_resv_unlock().
     368             :  */
     369             : static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
     370             :                                               struct ww_acquire_ctx *ctx)
     371             : {
     372           0 :         return ww_mutex_lock_interruptible(&obj->lock, ctx);
     373             : }
     374             : 
     375             : /**
     376             :  * dma_resv_lock_slow - slowpath lock the reservation object
     377             :  * @obj: the reservation object
     378             :  * @ctx: the locking context
     379             :  *
     380             :  * Acquires the reservation object after a die case. This function
     381             :  * will sleep until the lock becomes available. See dma_resv_lock() as
     382             :  * well.
     383             :  *
     384             :  * See also dma_resv_lock_slow_interruptible() for the interruptible variant.
     385             :  */
     386             : static inline void dma_resv_lock_slow(struct dma_resv *obj,
     387             :                                       struct ww_acquire_ctx *ctx)
     388             : {
     389             :         ww_mutex_lock_slow(&obj->lock, ctx);
     390             : }
     391             : 
     392             : /**
     393             :  * dma_resv_lock_slow_interruptible - slowpath lock the reservation
     394             :  * object, interruptible
     395             :  * @obj: the reservation object
     396             :  * @ctx: the locking context
     397             :  *
     398             :  * Acquires the reservation object interruptible after a die case. This function
     399             :  * will sleep until the lock becomes available. See
     400             :  * dma_resv_lock_interruptible() as well.
     401             :  */
     402             : static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj,
     403             :                                                    struct ww_acquire_ctx *ctx)
     404             : {
     405           0 :         return ww_mutex_lock_slow_interruptible(&obj->lock, ctx);
     406             : }
     407             : 
     408             : /**
     409             :  * dma_resv_trylock - trylock the reservation object
     410             :  * @obj: the reservation object
     411             :  *
     412             :  * Tries to lock the reservation object for exclusive access and modification.
     413             :  * Note, that the lock is only against other writers, readers will run
     414             :  * concurrently with a writer under RCU. The seqlock is used to notify readers
     415             :  * if they overlap with a writer.
     416             :  *
     417             :  * Also note that since no context is provided, no deadlock protection is
     418             :  * possible, which is also not needed for a trylock.
     419             :  *
     420             :  * Returns true if the lock was acquired, false otherwise.
     421             :  */
     422             : static inline bool __must_check dma_resv_trylock(struct dma_resv *obj)
     423             : {
     424           0 :         return ww_mutex_trylock(&obj->lock, NULL);
     425             : }
     426             : 
     427             : /**
     428             :  * dma_resv_is_locked - is the reservation object locked
     429             :  * @obj: the reservation object
     430             :  *
     431             :  * Returns true if the mutex is locked, false if unlocked.
     432             :  */
     433             : static inline bool dma_resv_is_locked(struct dma_resv *obj)
     434             : {
     435             :         return ww_mutex_is_locked(&obj->lock);
     436             : }
     437             : 
     438             : /**
     439             :  * dma_resv_locking_ctx - returns the context used to lock the object
     440             :  * @obj: the reservation object
     441             :  *
     442             :  * Returns the context used to lock a reservation object or NULL if no context
     443             :  * was used or the object is not locked at all.
     444             :  *
     445             :  * WARNING: This interface is pretty horrible, but TTM needs it because it
     446             :  * doesn't pass the struct ww_acquire_ctx around in some very long callchains.
     447             :  * Everyone else just uses it to check whether they're holding a reservation or
     448             :  * not.
     449             :  */
     450             : static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
     451             : {
     452             :         return READ_ONCE(obj->lock.ctx);
     453             : }
     454             : 
     455             : /**
     456             :  * dma_resv_unlock - unlock the reservation object
     457             :  * @obj: the reservation object
     458             :  *
     459             :  * Unlocks the reservation object following exclusive access.
     460             :  */
     461             : static inline void dma_resv_unlock(struct dma_resv *obj)
     462             : {
     463           0 :         dma_resv_reset_max_fences(obj);
     464           0 :         ww_mutex_unlock(&obj->lock);
     465             : }
     466             : 
     467             : void dma_resv_init(struct dma_resv *obj);
     468             : void dma_resv_fini(struct dma_resv *obj);
     469             : int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences);
     470             : void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
     471             :                         enum dma_resv_usage usage);
     472             : void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
     473             :                              struct dma_fence *fence,
     474             :                              enum dma_resv_usage usage);
     475             : int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
     476             :                         unsigned int *num_fences, struct dma_fence ***fences);
     477             : int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
     478             :                            struct dma_fence **fence);
     479             : int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
     480             : long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
     481             :                            bool intr, unsigned long timeout);
     482             : bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage);
     483             : void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq);
     484             : 
     485             : #endif /* _LINUX_RESERVATION_H */

Generated by: LCOV version 1.14