Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only 2 : /* 3 : * dma-fence-array: aggregate fences to be waited together 4 : * 5 : * Copyright (C) 2016 Collabora Ltd 6 : * Copyright (C) 2016 Advanced Micro Devices, Inc. 7 : * Authors: 8 : * Gustavo Padovan <gustavo@padovan.org> 9 : * Christian König <christian.koenig@amd.com> 10 : */ 11 : 12 : #include <linux/export.h> 13 : #include <linux/slab.h> 14 : #include <linux/dma-fence-array.h> 15 : 16 : #define PENDING_ERROR 1 17 : 18 0 : static const char *dma_fence_array_get_driver_name(struct dma_fence *fence) 19 : { 20 0 : return "dma_fence_array"; 21 : } 22 : 23 0 : static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence) 24 : { 25 0 : return "unbound"; 26 : } 27 : 28 : static void dma_fence_array_set_pending_error(struct dma_fence_array *array, 29 : int error) 30 : { 31 : /* 32 : * Propagate the first error reported by any of our fences, but only 33 : * before we ourselves are signaled. 34 : */ 35 0 : if (error) 36 0 : cmpxchg(&array->base.error, PENDING_ERROR, error); 37 : } 38 : 39 : static void dma_fence_array_clear_pending_error(struct dma_fence_array *array) 40 : { 41 : /* Clear the error flag if not actually set. */ 42 0 : cmpxchg(&array->base.error, PENDING_ERROR, 0); 43 : } 44 : 45 0 : static void irq_dma_fence_array_work(struct irq_work *wrk) 46 : { 47 0 : struct dma_fence_array *array = container_of(wrk, typeof(*array), work); 48 : 49 0 : dma_fence_array_clear_pending_error(array); 50 : 51 0 : dma_fence_signal(&array->base); 52 0 : dma_fence_put(&array->base); 53 0 : } 54 : 55 0 : static void dma_fence_array_cb_func(struct dma_fence *f, 56 : struct dma_fence_cb *cb) 57 : { 58 0 : struct dma_fence_array_cb *array_cb = 59 0 : container_of(cb, struct dma_fence_array_cb, cb); 60 0 : struct dma_fence_array *array = array_cb->array; 61 : 62 0 : dma_fence_array_set_pending_error(array, f->error); 63 : 64 0 : if (atomic_dec_and_test(&array->num_pending)) 65 0 : irq_work_queue(&array->work); 66 : else 67 0 : dma_fence_put(&array->base); 68 0 : } 69 : 70 0 : static bool dma_fence_array_enable_signaling(struct dma_fence *fence) 71 : { 72 0 : struct dma_fence_array *array = to_dma_fence_array(fence); 73 0 : struct dma_fence_array_cb *cb = (void *)(&array[1]); 74 : unsigned i; 75 : 76 0 : for (i = 0; i < array->num_fences; ++i) { 77 0 : cb[i].array = array; 78 : /* 79 : * As we may report that the fence is signaled before all 80 : * callbacks are complete, we need to take an additional 81 : * reference count on the array so that we do not free it too 82 : * early. The core fence handling will only hold the reference 83 : * until we signal the array as complete (but that is now 84 : * insufficient). 85 : */ 86 0 : dma_fence_get(&array->base); 87 0 : if (dma_fence_add_callback(array->fences[i], &cb[i].cb, 88 : dma_fence_array_cb_func)) { 89 0 : int error = array->fences[i]->error; 90 : 91 0 : dma_fence_array_set_pending_error(array, error); 92 0 : dma_fence_put(&array->base); 93 0 : if (atomic_dec_and_test(&array->num_pending)) { 94 0 : dma_fence_array_clear_pending_error(array); 95 0 : return false; 96 : } 97 : } 98 : } 99 : 100 : return true; 101 : } 102 : 103 0 : static bool dma_fence_array_signaled(struct dma_fence *fence) 104 : { 105 0 : struct dma_fence_array *array = to_dma_fence_array(fence); 106 : 107 0 : if (atomic_read(&array->num_pending) > 0) 108 : return false; 109 : 110 0 : dma_fence_array_clear_pending_error(array); 111 0 : return true; 112 : } 113 : 114 0 : static void dma_fence_array_release(struct dma_fence *fence) 115 : { 116 0 : struct dma_fence_array *array = to_dma_fence_array(fence); 117 : unsigned i; 118 : 119 0 : for (i = 0; i < array->num_fences; ++i) 120 0 : dma_fence_put(array->fences[i]); 121 : 122 0 : kfree(array->fences); 123 0 : dma_fence_free(fence); 124 0 : } 125 : 126 0 : static void dma_fence_array_set_deadline(struct dma_fence *fence, 127 : ktime_t deadline) 128 : { 129 0 : struct dma_fence_array *array = to_dma_fence_array(fence); 130 : unsigned i; 131 : 132 0 : for (i = 0; i < array->num_fences; ++i) 133 0 : dma_fence_set_deadline(array->fences[i], deadline); 134 0 : } 135 : 136 : const struct dma_fence_ops dma_fence_array_ops = { 137 : .get_driver_name = dma_fence_array_get_driver_name, 138 : .get_timeline_name = dma_fence_array_get_timeline_name, 139 : .enable_signaling = dma_fence_array_enable_signaling, 140 : .signaled = dma_fence_array_signaled, 141 : .release = dma_fence_array_release, 142 : .set_deadline = dma_fence_array_set_deadline, 143 : }; 144 : EXPORT_SYMBOL(dma_fence_array_ops); 145 : 146 : /** 147 : * dma_fence_array_create - Create a custom fence array 148 : * @num_fences: [in] number of fences to add in the array 149 : * @fences: [in] array containing the fences 150 : * @context: [in] fence context to use 151 : * @seqno: [in] sequence number to use 152 : * @signal_on_any: [in] signal on any fence in the array 153 : * 154 : * Allocate a dma_fence_array object and initialize the base fence with 155 : * dma_fence_init(). 156 : * In case of error it returns NULL. 157 : * 158 : * The caller should allocate the fences array with num_fences size 159 : * and fill it with the fences it wants to add to the object. Ownership of this 160 : * array is taken and dma_fence_put() is used on each fence on release. 161 : * 162 : * If @signal_on_any is true the fence array signals if any fence in the array 163 : * signals, otherwise it signals when all fences in the array signal. 164 : */ 165 0 : struct dma_fence_array *dma_fence_array_create(int num_fences, 166 : struct dma_fence **fences, 167 : u64 context, unsigned seqno, 168 : bool signal_on_any) 169 : { 170 : struct dma_fence_array *array; 171 0 : size_t size = sizeof(*array); 172 : 173 0 : WARN_ON(!num_fences || !fences); 174 : 175 : /* Allocate the callback structures behind the array. */ 176 0 : size += num_fences * sizeof(struct dma_fence_array_cb); 177 0 : array = kzalloc(size, GFP_KERNEL); 178 0 : if (!array) 179 : return NULL; 180 : 181 0 : spin_lock_init(&array->lock); 182 0 : dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock, 183 : context, seqno); 184 0 : init_irq_work(&array->work, irq_dma_fence_array_work); 185 : 186 0 : array->num_fences = num_fences; 187 0 : atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences); 188 0 : array->fences = fences; 189 : 190 0 : array->base.error = PENDING_ERROR; 191 : 192 : /* 193 : * dma_fence_array objects should never contain any other fence 194 : * containers or otherwise we run into recursion and potential kernel 195 : * stack overflow on operations on the dma_fence_array. 196 : * 197 : * The correct way of handling this is to flatten out the array by the 198 : * caller instead. 199 : * 200 : * Enforce this here by checking that we don't create a dma_fence_array 201 : * with any container inside. 202 : */ 203 0 : while (num_fences--) 204 0 : WARN_ON(dma_fence_is_container(fences[num_fences])); 205 : 206 : return array; 207 : } 208 : EXPORT_SYMBOL(dma_fence_array_create); 209 : 210 : /** 211 : * dma_fence_match_context - Check if all fences are from the given context 212 : * @fence: [in] fence or fence array 213 : * @context: [in] fence context to check all fences against 214 : * 215 : * Checks the provided fence or, for a fence array, all fences in the array 216 : * against the given context. Returns false if any fence is from a different 217 : * context. 218 : */ 219 0 : bool dma_fence_match_context(struct dma_fence *fence, u64 context) 220 : { 221 0 : struct dma_fence_array *array = to_dma_fence_array(fence); 222 : unsigned i; 223 : 224 0 : if (!dma_fence_is_array(fence)) 225 0 : return fence->context == context; 226 : 227 0 : for (i = 0; i < array->num_fences; i++) { 228 0 : if (array->fences[i]->context != context) 229 : return false; 230 : } 231 : 232 : return true; 233 : } 234 : EXPORT_SYMBOL(dma_fence_match_context); 235 : 236 0 : struct dma_fence *dma_fence_array_first(struct dma_fence *head) 237 : { 238 : struct dma_fence_array *array; 239 : 240 0 : if (!head) 241 : return NULL; 242 : 243 0 : array = to_dma_fence_array(head); 244 0 : if (!array) 245 : return head; 246 : 247 0 : if (!array->num_fences) 248 : return NULL; 249 : 250 0 : return array->fences[0]; 251 : } 252 : EXPORT_SYMBOL(dma_fence_array_first); 253 : 254 0 : struct dma_fence *dma_fence_array_next(struct dma_fence *head, 255 : unsigned int index) 256 : { 257 0 : struct dma_fence_array *array = to_dma_fence_array(head); 258 : 259 0 : if (!array || index >= array->num_fences) 260 : return NULL; 261 : 262 0 : return array->fences[index]; 263 : } 264 : EXPORT_SYMBOL(dma_fence_array_next);