| 1 | /* $NetBSD: nouveau_fence.c,v 1.4 2016/04/13 07:57:15 riastradh Exp $ */ |
| 2 | |
| 3 | /* |
| 4 | * Copyright (C) 2007 Ben Skeggs. |
| 5 | * All Rights Reserved. |
| 6 | * |
| 7 | * Permission is hereby granted, free of charge, to any person obtaining |
| 8 | * a copy of this software and associated documentation files (the |
| 9 | * "Software"), to deal in the Software without restriction, including |
| 10 | * without limitation the rights to use, copy, modify, merge, publish, |
| 11 | * distribute, sublicense, and/or sell copies of the Software, and to |
| 12 | * permit persons to whom the Software is furnished to do so, subject to |
| 13 | * the following conditions: |
| 14 | * |
| 15 | * The above copyright notice and this permission notice (including the |
| 16 | * next paragraph) shall be included in all copies or substantial |
| 17 | * portions of the Software. |
| 18 | * |
| 19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 20 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. |
| 22 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE |
| 23 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION |
| 24 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION |
| 25 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 26 | * |
| 27 | */ |
| 28 | |
| 29 | #include <sys/cdefs.h> |
| 30 | __KERNEL_RCSID(0, "$NetBSD: nouveau_fence.c,v 1.4 2016/04/13 07:57:15 riastradh Exp $" ); |
| 31 | |
| 32 | #include <drm/drmP.h> |
| 33 | |
| 34 | #include <asm/param.h> |
| 35 | #include <linux/ktime.h> |
| 36 | #include <linux/hrtimer.h> |
| 37 | |
| 38 | #include "nouveau_drm.h" |
| 39 | #include "nouveau_dma.h" |
| 40 | #include "nouveau_fence.h" |
| 41 | |
| 42 | #include <engine/fifo.h> |
| 43 | |
| 44 | struct fence_work { |
| 45 | struct work_struct base; |
| 46 | struct list_head head; |
| 47 | void (*func)(void *); |
| 48 | void *data; |
| 49 | }; |
| 50 | |
| 51 | static void |
| 52 | nouveau_fence_signal(struct nouveau_fence *fence) |
| 53 | { |
| 54 | struct fence_work *work, *temp; |
| 55 | |
| 56 | list_for_each_entry_safe(work, temp, &fence->work, head) { |
| 57 | schedule_work(&work->base); |
| 58 | list_del(&work->head); |
| 59 | } |
| 60 | |
| 61 | fence->channel = NULL; |
| 62 | list_del(&fence->head); |
| 63 | } |
| 64 | |
| 65 | void |
| 66 | nouveau_fence_context_del(struct nouveau_fence_chan *fctx) |
| 67 | { |
| 68 | struct nouveau_fence *fence, *fnext; |
| 69 | spin_lock(&fctx->lock); |
| 70 | list_for_each_entry_safe(fence, fnext, &fctx->pending, head) { |
| 71 | nouveau_fence_signal(fence); |
| 72 | } |
| 73 | spin_unlock(&fctx->lock); |
| 74 | spin_lock_destroy(&fctx->lock); |
| 75 | } |
| 76 | |
| 77 | void |
| 78 | nouveau_fence_context_new(struct nouveau_fence_chan *fctx) |
| 79 | { |
| 80 | INIT_LIST_HEAD(&fctx->flip); |
| 81 | INIT_LIST_HEAD(&fctx->pending); |
| 82 | spin_lock_init(&fctx->lock); |
| 83 | } |
| 84 | |
| 85 | static void |
| 86 | nouveau_fence_work_handler(struct work_struct *kwork) |
| 87 | { |
| 88 | struct fence_work *work = container_of(kwork, typeof(*work), base); |
| 89 | work->func(work->data); |
| 90 | kfree(work); |
| 91 | } |
| 92 | |
| 93 | void |
| 94 | nouveau_fence_work(struct nouveau_fence *fence, |
| 95 | void (*func)(void *), void *data) |
| 96 | { |
| 97 | struct nouveau_channel *chan = fence->channel; |
| 98 | struct nouveau_fence_chan *fctx; |
| 99 | struct fence_work *work = NULL; |
| 100 | |
| 101 | if (nouveau_fence_done(fence)) { |
| 102 | func(data); |
| 103 | return; |
| 104 | } |
| 105 | |
| 106 | fctx = chan->fence; |
| 107 | work = kmalloc(sizeof(*work), GFP_KERNEL); |
| 108 | if (!work) { |
| 109 | WARN_ON(nouveau_fence_wait(fence, false, false)); |
| 110 | func(data); |
| 111 | return; |
| 112 | } |
| 113 | |
| 114 | spin_lock(&fctx->lock); |
| 115 | if (!fence->channel) { |
| 116 | spin_unlock(&fctx->lock); |
| 117 | kfree(work); |
| 118 | func(data); |
| 119 | return; |
| 120 | } |
| 121 | |
| 122 | INIT_WORK(&work->base, nouveau_fence_work_handler); |
| 123 | work->func = func; |
| 124 | work->data = data; |
| 125 | list_add(&work->head, &fence->work); |
| 126 | spin_unlock(&fctx->lock); |
| 127 | } |
| 128 | |
| 129 | static void |
| 130 | nouveau_fence_update(struct nouveau_channel *chan) |
| 131 | { |
| 132 | struct nouveau_fence_chan *fctx = chan->fence; |
| 133 | struct nouveau_fence *fence, *fnext; |
| 134 | |
| 135 | spin_lock(&fctx->lock); |
| 136 | list_for_each_entry_safe(fence, fnext, &fctx->pending, head) { |
| 137 | if (fctx->read(chan) < fence->sequence) |
| 138 | break; |
| 139 | |
| 140 | nouveau_fence_signal(fence); |
| 141 | nouveau_fence_unref(&fence); |
| 142 | } |
| 143 | spin_unlock(&fctx->lock); |
| 144 | } |
| 145 | |
| 146 | int |
| 147 | nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) |
| 148 | { |
| 149 | struct nouveau_fence_chan *fctx = chan->fence; |
| 150 | int ret; |
| 151 | |
| 152 | fence->channel = chan; |
| 153 | fence->timeout = jiffies + (15 * HZ); |
| 154 | fence->sequence = ++fctx->sequence; |
| 155 | |
| 156 | ret = fctx->emit(fence); |
| 157 | if (!ret) { |
| 158 | kref_get(&fence->kref); |
| 159 | spin_lock(&fctx->lock); |
| 160 | list_add_tail(&fence->head, &fctx->pending); |
| 161 | spin_unlock(&fctx->lock); |
| 162 | } |
| 163 | |
| 164 | return ret; |
| 165 | } |
| 166 | |
| 167 | bool |
| 168 | nouveau_fence_done(struct nouveau_fence *fence) |
| 169 | { |
| 170 | if (fence->channel) |
| 171 | nouveau_fence_update(fence->channel); |
| 172 | return !fence->channel; |
| 173 | } |
| 174 | |
| 175 | static int |
| 176 | nouveau_fence_wait_uevent_handler(void *data, int index) |
| 177 | { |
| 178 | struct nouveau_fence_priv *priv = data; |
| 179 | #ifdef __NetBSD__ |
| 180 | spin_lock(&priv->waitlock); |
| 181 | /* XXX Set a flag... */ |
| 182 | DRM_SPIN_WAKEUP_ALL(&priv->waitqueue, &priv->waitlock); |
| 183 | spin_unlock(&priv->waitlock); |
| 184 | #else |
| 185 | wake_up_all(&priv->waiting); |
| 186 | #endif |
| 187 | return NVKM_EVENT_KEEP; |
| 188 | } |
| 189 | |
| 190 | static int |
| 191 | nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr) |
| 192 | |
| 193 | { |
| 194 | struct nouveau_channel *chan = fence->channel; |
| 195 | struct nouveau_fifo *pfifo = nouveau_fifo(chan->drm->device); |
| 196 | struct nouveau_fence_priv *priv = chan->drm->fence; |
| 197 | struct nouveau_eventh *handler; |
| 198 | int ret = 0; |
| 199 | |
| 200 | ret = nouveau_event_new(pfifo->uevent, 0, |
| 201 | nouveau_fence_wait_uevent_handler, |
| 202 | priv, &handler); |
| 203 | if (ret) |
| 204 | return ret; |
| 205 | |
| 206 | nouveau_event_get(handler); |
| 207 | |
| 208 | if (fence->timeout) { |
| 209 | unsigned long timeout = fence->timeout - jiffies; |
| 210 | |
| 211 | if (time_before(jiffies, fence->timeout)) { |
| 212 | #ifdef __NetBSD__ |
| 213 | spin_lock(&priv->waitlock); |
| 214 | if (intr) { |
| 215 | DRM_SPIN_TIMED_WAIT_UNTIL(ret, |
| 216 | &priv->waitqueue, &priv->waitlock, |
| 217 | timeout, |
| 218 | nouveau_fence_done(fence)); |
| 219 | } else { |
| 220 | DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(ret, |
| 221 | &priv->waitqueue, &priv->waitlock, |
| 222 | timeout, |
| 223 | nouveau_fence_done(fence)); |
| 224 | } |
| 225 | spin_unlock(&priv->waitlock); |
| 226 | #else |
| 227 | if (intr) { |
| 228 | ret = wait_event_interruptible_timeout( |
| 229 | priv->waiting, |
| 230 | nouveau_fence_done(fence), |
| 231 | timeout); |
| 232 | } else { |
| 233 | ret = wait_event_timeout(priv->waiting, |
| 234 | nouveau_fence_done(fence), |
| 235 | timeout); |
| 236 | } |
| 237 | #endif |
| 238 | } |
| 239 | |
| 240 | if (ret >= 0) { |
| 241 | fence->timeout = jiffies + ret; |
| 242 | if (time_after_eq(jiffies, fence->timeout)) |
| 243 | ret = -EBUSY; |
| 244 | } |
| 245 | } else { |
| 246 | #ifdef __NetBSD__ |
| 247 | spin_lock(&priv->waitlock); |
| 248 | if (intr) { |
| 249 | DRM_SPIN_WAIT_UNTIL(ret, &priv->waitqueue, |
| 250 | &priv->waitlock, |
| 251 | nouveau_fence_done(fence)); |
| 252 | } else { |
| 253 | DRM_SPIN_WAIT_NOINTR_UNTIL(ret, &priv->waitqueue, |
| 254 | &priv->waitlock, |
| 255 | nouveau_fence_done(fence)); |
| 256 | } |
| 257 | spin_unlock(&priv->waitlock); |
| 258 | #else |
| 259 | if (intr) { |
| 260 | ret = wait_event_interruptible(priv->waiting, |
| 261 | nouveau_fence_done(fence)); |
| 262 | } else { |
| 263 | wait_event(priv->waiting, nouveau_fence_done(fence)); |
| 264 | } |
| 265 | #endif |
| 266 | } |
| 267 | |
| 268 | nouveau_event_ref(NULL, &handler); |
| 269 | if (unlikely(ret < 0)) |
| 270 | return ret; |
| 271 | |
| 272 | return 0; |
| 273 | } |
| 274 | |
| 275 | int |
| 276 | nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr) |
| 277 | { |
| 278 | struct nouveau_channel *chan = fence->channel; |
| 279 | struct nouveau_fence_priv *priv = chan ? chan->drm->fence : NULL; |
| 280 | #ifndef __NetBSD__ |
| 281 | unsigned long sleep_time = NSEC_PER_MSEC / 1000; |
| 282 | ktime_t t; |
| 283 | #endif |
| 284 | int ret = 0; |
| 285 | |
| 286 | while (priv && priv->uevent && lazy && !nouveau_fence_done(fence)) { |
| 287 | ret = nouveau_fence_wait_uevent(fence, intr); |
| 288 | if (ret < 0) |
| 289 | return ret; |
| 290 | } |
| 291 | |
| 292 | while (!nouveau_fence_done(fence)) { |
| 293 | if (fence->timeout && time_after_eq(jiffies, fence->timeout)) { |
| 294 | ret = -EBUSY; |
| 295 | break; |
| 296 | } |
| 297 | |
| 298 | #ifdef __NetBSD__ |
| 299 | if (lazy) |
| 300 | kpause("nvfencep" , intr, 1, NULL); |
| 301 | else |
| 302 | DELAY(1); |
| 303 | #else |
| 304 | __set_current_state(intr ? TASK_INTERRUPTIBLE : |
| 305 | TASK_UNINTERRUPTIBLE); |
| 306 | if (lazy) { |
| 307 | t = ktime_set(0, sleep_time); |
| 308 | schedule_hrtimeout(&t, HRTIMER_MODE_REL); |
| 309 | sleep_time *= 2; |
| 310 | if (sleep_time > NSEC_PER_MSEC) |
| 311 | sleep_time = NSEC_PER_MSEC; |
| 312 | } |
| 313 | |
| 314 | if (intr && signal_pending(current)) { |
| 315 | ret = -ERESTARTSYS; |
| 316 | break; |
| 317 | } |
| 318 | #endif |
| 319 | } |
| 320 | |
| 321 | #ifndef __NetBSD__ |
| 322 | __set_current_state(TASK_RUNNING); |
| 323 | #endif |
| 324 | return ret; |
| 325 | } |
| 326 | |
| 327 | int |
| 328 | nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan) |
| 329 | { |
| 330 | struct nouveau_fence_chan *fctx = chan->fence; |
| 331 | struct nouveau_channel *prev; |
| 332 | int ret = 0; |
| 333 | |
| 334 | prev = fence ? fence->channel : NULL; |
| 335 | if (prev) { |
| 336 | if (unlikely(prev != chan && !nouveau_fence_done(fence))) { |
| 337 | ret = fctx->sync(fence, prev, chan); |
| 338 | if (unlikely(ret)) |
| 339 | ret = nouveau_fence_wait(fence, true, false); |
| 340 | } |
| 341 | } |
| 342 | |
| 343 | return ret; |
| 344 | } |
| 345 | |
| 346 | static void |
| 347 | nouveau_fence_del(struct kref *kref) |
| 348 | { |
| 349 | struct nouveau_fence *fence = container_of(kref, typeof(*fence), kref); |
| 350 | kfree(fence); |
| 351 | } |
| 352 | |
| 353 | void |
| 354 | nouveau_fence_unref(struct nouveau_fence **pfence) |
| 355 | { |
| 356 | if (*pfence) |
| 357 | kref_put(&(*pfence)->kref, nouveau_fence_del); |
| 358 | *pfence = NULL; |
| 359 | } |
| 360 | |
| 361 | struct nouveau_fence * |
| 362 | nouveau_fence_ref(struct nouveau_fence *fence) |
| 363 | { |
| 364 | if (fence) |
| 365 | kref_get(&fence->kref); |
| 366 | return fence; |
| 367 | } |
| 368 | |
| 369 | int |
| 370 | nouveau_fence_new(struct nouveau_channel *chan, bool sysmem, |
| 371 | struct nouveau_fence **pfence) |
| 372 | { |
| 373 | struct nouveau_fence *fence; |
| 374 | int ret = 0; |
| 375 | |
| 376 | if (unlikely(!chan->fence)) |
| 377 | return -ENODEV; |
| 378 | |
| 379 | fence = kzalloc(sizeof(*fence), GFP_KERNEL); |
| 380 | if (!fence) |
| 381 | return -ENOMEM; |
| 382 | |
| 383 | INIT_LIST_HEAD(&fence->work); |
| 384 | fence->sysmem = sysmem; |
| 385 | kref_init(&fence->kref); |
| 386 | |
| 387 | ret = nouveau_fence_emit(fence, chan); |
| 388 | if (ret) |
| 389 | nouveau_fence_unref(&fence); |
| 390 | |
| 391 | *pfence = fence; |
| 392 | return ret; |
| 393 | } |
| 394 | |