| 1 | /* $NetBSD: nouveau_gem.c,v 1.4 2016/01/29 23:58:22 riastradh Exp $ */ |
| 2 | |
| 3 | /* |
| 4 | * Copyright (C) 2008 Ben Skeggs. |
| 5 | * All Rights Reserved. |
| 6 | * |
| 7 | * Permission is hereby granted, free of charge, to any person obtaining |
| 8 | * a copy of this software and associated documentation files (the |
| 9 | * "Software"), to deal in the Software without restriction, including |
| 10 | * without limitation the rights to use, copy, modify, merge, publish, |
| 11 | * distribute, sublicense, and/or sell copies of the Software, and to |
| 12 | * permit persons to whom the Software is furnished to do so, subject to |
| 13 | * the following conditions: |
| 14 | * |
| 15 | * The above copyright notice and this permission notice (including the |
| 16 | * next paragraph) shall be included in all copies or substantial |
| 17 | * portions of the Software. |
| 18 | * |
| 19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 20 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. |
| 22 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE |
| 23 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION |
| 24 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION |
| 25 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 26 | * |
| 27 | */ |
| 28 | |
| 29 | #include <sys/cdefs.h> |
| 30 | __KERNEL_RCSID(0, "$NetBSD: nouveau_gem.c,v 1.4 2016/01/29 23:58:22 riastradh Exp $" ); |
| 31 | |
| 32 | #include <subdev/fb.h> |
| 33 | |
| 34 | #include <linux/err.h> /* XXX */ |
| 35 | |
| 36 | #include "nouveau_drm.h" |
| 37 | #include "nouveau_dma.h" |
| 38 | #include "nouveau_fence.h" |
| 39 | #include "nouveau_abi16.h" |
| 40 | |
| 41 | #include "nouveau_ttm.h" |
| 42 | #include "nouveau_gem.h" |
| 43 | |
| 44 | void |
| 45 | nouveau_gem_object_del(struct drm_gem_object *gem) |
| 46 | { |
| 47 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); |
| 48 | struct ttm_buffer_object *bo = &nvbo->bo; |
| 49 | |
| 50 | #ifndef __NetBSD__ /* XXX drm prime */ |
| 51 | if (gem->import_attach) |
| 52 | drm_prime_gem_destroy(gem, nvbo->bo.sg); |
| 53 | #endif |
| 54 | |
| 55 | drm_gem_object_release(gem); |
| 56 | |
| 57 | /* reset filp so nouveau_bo_del_ttm() can test for it */ |
| 58 | #ifdef __NetBSD__ |
| 59 | /* XXX Whattakludge! */ |
| 60 | gem->gemo_shm_uao = NULL; |
| 61 | #else |
| 62 | gem->filp = NULL; |
| 63 | #endif |
| 64 | ttm_bo_unref(&bo); |
| 65 | } |
| 66 | |
| 67 | int |
| 68 | nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) |
| 69 | { |
| 70 | struct nouveau_cli *cli = nouveau_cli(file_priv); |
| 71 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); |
| 72 | struct nouveau_vma *vma; |
| 73 | int ret; |
| 74 | |
| 75 | if (!cli->base.vm) |
| 76 | return 0; |
| 77 | |
| 78 | ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); |
| 79 | if (ret) |
| 80 | return ret; |
| 81 | |
| 82 | vma = nouveau_bo_vma_find(nvbo, cli->base.vm); |
| 83 | if (!vma) { |
| 84 | vma = kzalloc(sizeof(*vma), GFP_KERNEL); |
| 85 | if (!vma) { |
| 86 | ret = -ENOMEM; |
| 87 | goto out; |
| 88 | } |
| 89 | |
| 90 | ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma); |
| 91 | if (ret) { |
| 92 | kfree(vma); |
| 93 | goto out; |
| 94 | } |
| 95 | } else { |
| 96 | vma->refcount++; |
| 97 | } |
| 98 | |
| 99 | out: |
| 100 | ttm_bo_unreserve(&nvbo->bo); |
| 101 | return ret; |
| 102 | } |
| 103 | |
| 104 | static void |
| 105 | nouveau_gem_object_delete(void *data) |
| 106 | { |
| 107 | struct nouveau_vma *vma = data; |
| 108 | nouveau_vm_unmap(vma); |
| 109 | nouveau_vm_put(vma); |
| 110 | kfree(vma); |
| 111 | } |
| 112 | |
| 113 | static void |
| 114 | nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) |
| 115 | { |
| 116 | const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; |
| 117 | struct nouveau_fence *fence = NULL; |
| 118 | |
| 119 | list_del(&vma->head); |
| 120 | |
| 121 | if (mapped) { |
| 122 | spin_lock(&nvbo->bo.bdev->fence_lock); |
| 123 | fence = nouveau_fence_ref(nvbo->bo.sync_obj); |
| 124 | spin_unlock(&nvbo->bo.bdev->fence_lock); |
| 125 | } |
| 126 | |
| 127 | if (fence) { |
| 128 | nouveau_fence_work(fence, nouveau_gem_object_delete, vma); |
| 129 | } else { |
| 130 | if (mapped) |
| 131 | nouveau_vm_unmap(vma); |
| 132 | nouveau_vm_put(vma); |
| 133 | kfree(vma); |
| 134 | } |
| 135 | nouveau_fence_unref(&fence); |
| 136 | } |
| 137 | |
| 138 | void |
| 139 | nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) |
| 140 | { |
| 141 | struct nouveau_cli *cli = nouveau_cli(file_priv); |
| 142 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); |
| 143 | struct nouveau_vma *vma; |
| 144 | int ret; |
| 145 | |
| 146 | if (!cli->base.vm) |
| 147 | return; |
| 148 | |
| 149 | ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); |
| 150 | if (ret) |
| 151 | return; |
| 152 | |
| 153 | vma = nouveau_bo_vma_find(nvbo, cli->base.vm); |
| 154 | if (vma) { |
| 155 | if (--vma->refcount == 0) |
| 156 | nouveau_gem_object_unmap(nvbo, vma); |
| 157 | } |
| 158 | ttm_bo_unreserve(&nvbo->bo); |
| 159 | } |
| 160 | |
| 161 | int |
| 162 | nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, |
| 163 | uint32_t tile_mode, uint32_t tile_flags, |
| 164 | struct nouveau_bo **pnvbo) |
| 165 | { |
| 166 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 167 | struct nouveau_bo *nvbo; |
| 168 | u32 flags = 0; |
| 169 | int ret; |
| 170 | |
| 171 | if (domain & NOUVEAU_GEM_DOMAIN_VRAM) |
| 172 | flags |= TTM_PL_FLAG_VRAM; |
| 173 | if (domain & NOUVEAU_GEM_DOMAIN_GART) |
| 174 | flags |= TTM_PL_FLAG_TT; |
| 175 | if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU) |
| 176 | flags |= TTM_PL_FLAG_SYSTEM; |
| 177 | |
| 178 | ret = nouveau_bo_new(dev, size, align, flags, tile_mode, |
| 179 | tile_flags, NULL, pnvbo); |
| 180 | if (ret) |
| 181 | return ret; |
| 182 | nvbo = *pnvbo; |
| 183 | |
| 184 | /* we restrict allowed domains on nv50+ to only the types |
| 185 | * that were requested at creation time. not possibly on |
| 186 | * earlier chips without busting the ABI. |
| 187 | */ |
| 188 | nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | |
| 189 | NOUVEAU_GEM_DOMAIN_GART; |
| 190 | if (nv_device(drm->device)->card_type >= NV_50) |
| 191 | nvbo->valid_domains &= domain; |
| 192 | |
| 193 | /* Initialize the embedded gem-object. We return a single gem-reference |
| 194 | * to the caller, instead of a normal nouveau_bo ttm reference. */ |
| 195 | ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size); |
| 196 | if (ret) { |
| 197 | nouveau_bo_ref(NULL, pnvbo); |
| 198 | return -ENOMEM; |
| 199 | } |
| 200 | |
| 201 | #ifndef __NetBSD__ /* XXX Let TTM swap; skip GEM like radeon. */ |
| 202 | nvbo->bo.persistent_swap_storage = nvbo->gem.filp; |
| 203 | #endif |
| 204 | return 0; |
| 205 | } |
| 206 | |
| 207 | static int |
| 208 | nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, |
| 209 | struct drm_nouveau_gem_info *rep) |
| 210 | { |
| 211 | struct nouveau_cli *cli = nouveau_cli(file_priv); |
| 212 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); |
| 213 | struct nouveau_vma *vma; |
| 214 | |
| 215 | if (nvbo->bo.mem.mem_type == TTM_PL_TT) |
| 216 | rep->domain = NOUVEAU_GEM_DOMAIN_GART; |
| 217 | else |
| 218 | rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; |
| 219 | |
| 220 | rep->offset = nvbo->bo.offset; |
| 221 | if (cli->base.vm) { |
| 222 | vma = nouveau_bo_vma_find(nvbo, cli->base.vm); |
| 223 | if (!vma) |
| 224 | return -EINVAL; |
| 225 | |
| 226 | rep->offset = vma->offset; |
| 227 | } |
| 228 | |
| 229 | rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; |
| 230 | rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node); |
| 231 | rep->tile_mode = nvbo->tile_mode; |
| 232 | rep->tile_flags = nvbo->tile_flags; |
| 233 | return 0; |
| 234 | } |
| 235 | |
| 236 | int |
| 237 | nouveau_gem_ioctl_new(struct drm_device *dev, void *data, |
| 238 | struct drm_file *file_priv) |
| 239 | { |
| 240 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 241 | struct nouveau_cli *cli = nouveau_cli(file_priv); |
| 242 | struct nouveau_fb *pfb = nouveau_fb(drm->device); |
| 243 | struct drm_nouveau_gem_new *req = data; |
| 244 | struct nouveau_bo *nvbo = NULL; |
| 245 | int ret = 0; |
| 246 | |
| 247 | if (!pfb->memtype_valid(pfb, req->info.tile_flags)) { |
| 248 | NV_ERROR(cli, "bad page flags: 0x%08x\n" , req->info.tile_flags); |
| 249 | return -EINVAL; |
| 250 | } |
| 251 | |
| 252 | ret = nouveau_gem_new(dev, req->info.size, req->align, |
| 253 | req->info.domain, req->info.tile_mode, |
| 254 | req->info.tile_flags, &nvbo); |
| 255 | if (ret) |
| 256 | return ret; |
| 257 | |
| 258 | ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle); |
| 259 | if (ret == 0) { |
| 260 | ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info); |
| 261 | if (ret) |
| 262 | drm_gem_handle_delete(file_priv, req->info.handle); |
| 263 | } |
| 264 | |
| 265 | /* drop reference from allocate - handle holds it now */ |
| 266 | drm_gem_object_unreference_unlocked(&nvbo->gem); |
| 267 | return ret; |
| 268 | } |
| 269 | |
| 270 | static int |
| 271 | nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, |
| 272 | uint32_t write_domains, uint32_t valid_domains) |
| 273 | { |
| 274 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); |
| 275 | struct ttm_buffer_object *bo = &nvbo->bo; |
| 276 | uint32_t domains = valid_domains & nvbo->valid_domains & |
| 277 | (write_domains ? write_domains : read_domains); |
| 278 | uint32_t pref_flags = 0, valid_flags = 0; |
| 279 | |
| 280 | if (!domains) |
| 281 | return -EINVAL; |
| 282 | |
| 283 | if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) |
| 284 | valid_flags |= TTM_PL_FLAG_VRAM; |
| 285 | |
| 286 | if (valid_domains & NOUVEAU_GEM_DOMAIN_GART) |
| 287 | valid_flags |= TTM_PL_FLAG_TT; |
| 288 | |
| 289 | if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) && |
| 290 | bo->mem.mem_type == TTM_PL_VRAM) |
| 291 | pref_flags |= TTM_PL_FLAG_VRAM; |
| 292 | |
| 293 | else if ((domains & NOUVEAU_GEM_DOMAIN_GART) && |
| 294 | bo->mem.mem_type == TTM_PL_TT) |
| 295 | pref_flags |= TTM_PL_FLAG_TT; |
| 296 | |
| 297 | else if (domains & NOUVEAU_GEM_DOMAIN_VRAM) |
| 298 | pref_flags |= TTM_PL_FLAG_VRAM; |
| 299 | |
| 300 | else |
| 301 | pref_flags |= TTM_PL_FLAG_TT; |
| 302 | |
| 303 | nouveau_bo_placement_set(nvbo, pref_flags, valid_flags); |
| 304 | |
| 305 | return 0; |
| 306 | } |
| 307 | |
| 308 | struct validate_op { |
| 309 | struct list_head vram_list; |
| 310 | struct list_head gart_list; |
| 311 | struct list_head both_list; |
| 312 | struct ww_acquire_ctx ticket; |
| 313 | }; |
| 314 | |
| 315 | static void |
| 316 | validate_fini_list(struct list_head *list, struct nouveau_fence *fence, |
| 317 | struct ww_acquire_ctx *ticket) |
| 318 | { |
| 319 | struct list_head *entry, *tmp; |
| 320 | struct nouveau_bo *nvbo; |
| 321 | |
| 322 | list_for_each_safe(entry, tmp, list) { |
| 323 | nvbo = list_entry(entry, struct nouveau_bo, entry); |
| 324 | |
| 325 | if (likely(fence)) |
| 326 | nouveau_bo_fence(nvbo, fence); |
| 327 | |
| 328 | if (unlikely(nvbo->validate_mapped)) { |
| 329 | ttm_bo_kunmap(&nvbo->kmap); |
| 330 | nvbo->validate_mapped = false; |
| 331 | } |
| 332 | |
| 333 | list_del(&nvbo->entry); |
| 334 | nvbo->reserved_by = NULL; |
| 335 | ttm_bo_unreserve_ticket(&nvbo->bo, ticket); |
| 336 | drm_gem_object_unreference_unlocked(&nvbo->gem); |
| 337 | } |
| 338 | } |
| 339 | |
| 340 | static void |
| 341 | validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence) |
| 342 | { |
| 343 | validate_fini_list(&op->vram_list, fence, &op->ticket); |
| 344 | validate_fini_list(&op->gart_list, fence, &op->ticket); |
| 345 | validate_fini_list(&op->both_list, fence, &op->ticket); |
| 346 | } |
| 347 | |
| 348 | static void |
| 349 | validate_fini(struct validate_op *op, struct nouveau_fence *fence) |
| 350 | { |
| 351 | validate_fini_no_ticket(op, fence); |
| 352 | ww_acquire_fini(&op->ticket); |
| 353 | } |
| 354 | |
| 355 | static int |
| 356 | validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, |
| 357 | struct drm_nouveau_gem_pushbuf_bo *pbbo, |
| 358 | int nr_buffers, struct validate_op *op) |
| 359 | { |
| 360 | struct nouveau_cli *cli = nouveau_cli(file_priv); |
| 361 | struct drm_device *dev = chan->drm->dev; |
| 362 | int trycnt = 0; |
| 363 | int ret, i; |
| 364 | struct nouveau_bo *res_bo = NULL; |
| 365 | |
| 366 | ww_acquire_init(&op->ticket, &reservation_ww_class); |
| 367 | retry: |
| 368 | if (++trycnt > 100000) { |
| 369 | NV_ERROR(cli, "%s failed and gave up.\n" , __func__); |
| 370 | return -EINVAL; |
| 371 | } |
| 372 | |
| 373 | for (i = 0; i < nr_buffers; i++) { |
| 374 | struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i]; |
| 375 | struct drm_gem_object *gem; |
| 376 | struct nouveau_bo *nvbo; |
| 377 | |
| 378 | gem = drm_gem_object_lookup(dev, file_priv, b->handle); |
| 379 | if (!gem) { |
| 380 | NV_ERROR(cli, "Unknown handle 0x%08x\n" , b->handle); |
| 381 | ww_acquire_done(&op->ticket); |
| 382 | validate_fini(op, NULL); |
| 383 | return -ENOENT; |
| 384 | } |
| 385 | nvbo = nouveau_gem_object(gem); |
| 386 | if (nvbo == res_bo) { |
| 387 | res_bo = NULL; |
| 388 | drm_gem_object_unreference_unlocked(gem); |
| 389 | continue; |
| 390 | } |
| 391 | |
| 392 | if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { |
| 393 | NV_ERROR(cli, "multiple instances of buffer %d on " |
| 394 | "validation list\n" , b->handle); |
| 395 | drm_gem_object_unreference_unlocked(gem); |
| 396 | ww_acquire_done(&op->ticket); |
| 397 | validate_fini(op, NULL); |
| 398 | return -EINVAL; |
| 399 | } |
| 400 | |
| 401 | ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket); |
| 402 | if (ret) { |
| 403 | validate_fini_no_ticket(op, NULL); |
| 404 | if (unlikely(ret == -EDEADLK)) { |
| 405 | ret = ttm_bo_reserve_slowpath(&nvbo->bo, true, |
| 406 | &op->ticket); |
| 407 | if (!ret) |
| 408 | res_bo = nvbo; |
| 409 | } |
| 410 | if (unlikely(ret)) { |
| 411 | ww_acquire_done(&op->ticket); |
| 412 | ww_acquire_fini(&op->ticket); |
| 413 | drm_gem_object_unreference_unlocked(gem); |
| 414 | if (ret != -ERESTARTSYS) |
| 415 | NV_ERROR(cli, "fail reserve\n" ); |
| 416 | return ret; |
| 417 | } |
| 418 | } |
| 419 | |
| 420 | b->user_priv = (uint64_t)(unsigned long)nvbo; |
| 421 | nvbo->reserved_by = file_priv; |
| 422 | nvbo->pbbo_index = i; |
| 423 | if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && |
| 424 | (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)) |
| 425 | list_add_tail(&nvbo->entry, &op->both_list); |
| 426 | else |
| 427 | if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) |
| 428 | list_add_tail(&nvbo->entry, &op->vram_list); |
| 429 | else |
| 430 | if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) |
| 431 | list_add_tail(&nvbo->entry, &op->gart_list); |
| 432 | else { |
| 433 | NV_ERROR(cli, "invalid valid domains: 0x%08x\n" , |
| 434 | b->valid_domains); |
| 435 | list_add_tail(&nvbo->entry, &op->both_list); |
| 436 | ww_acquire_done(&op->ticket); |
| 437 | validate_fini(op, NULL); |
| 438 | return -EINVAL; |
| 439 | } |
| 440 | if (nvbo == res_bo) |
| 441 | goto retry; |
| 442 | } |
| 443 | |
| 444 | ww_acquire_done(&op->ticket); |
| 445 | return 0; |
| 446 | } |
| 447 | |
| 448 | static int |
| 449 | validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo) |
| 450 | { |
| 451 | struct nouveau_fence *fence = NULL; |
| 452 | int ret = 0; |
| 453 | |
| 454 | spin_lock(&nvbo->bo.bdev->fence_lock); |
| 455 | fence = nouveau_fence_ref(nvbo->bo.sync_obj); |
| 456 | spin_unlock(&nvbo->bo.bdev->fence_lock); |
| 457 | |
| 458 | if (fence) { |
| 459 | ret = nouveau_fence_sync(fence, chan); |
| 460 | nouveau_fence_unref(&fence); |
| 461 | } |
| 462 | |
| 463 | return ret; |
| 464 | } |
| 465 | |
| 466 | #ifdef __NetBSD__ /* XXX yargleblargh */ |
| 467 | # define __force |
| 468 | #endif |
| 469 | |
| 470 | static int |
| 471 | validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli, |
| 472 | struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo, |
| 473 | uint64_t user_pbbo_ptr) |
| 474 | { |
| 475 | struct nouveau_drm *drm = chan->drm; |
| 476 | struct drm_nouveau_gem_pushbuf_bo __user *upbbo = |
| 477 | (void __force __user *)(uintptr_t)user_pbbo_ptr; |
| 478 | struct nouveau_bo *nvbo; |
| 479 | int ret, relocs = 0; |
| 480 | |
| 481 | list_for_each_entry(nvbo, list, entry) { |
| 482 | struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; |
| 483 | |
| 484 | ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains, |
| 485 | b->write_domains, |
| 486 | b->valid_domains); |
| 487 | if (unlikely(ret)) { |
| 488 | NV_ERROR(cli, "fail set_domain\n" ); |
| 489 | return ret; |
| 490 | } |
| 491 | |
| 492 | ret = nouveau_bo_validate(nvbo, true, false); |
| 493 | if (unlikely(ret)) { |
| 494 | if (ret != -ERESTARTSYS) |
| 495 | NV_ERROR(cli, "fail ttm_validate\n" ); |
| 496 | return ret; |
| 497 | } |
| 498 | |
| 499 | ret = validate_sync(chan, nvbo); |
| 500 | if (unlikely(ret)) { |
| 501 | NV_ERROR(cli, "fail post-validate sync\n" ); |
| 502 | return ret; |
| 503 | } |
| 504 | |
| 505 | if (nv_device(drm->device)->card_type < NV_50) { |
| 506 | if (nvbo->bo.offset == b->presumed.offset && |
| 507 | ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && |
| 508 | b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || |
| 509 | (nvbo->bo.mem.mem_type == TTM_PL_TT && |
| 510 | b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) |
| 511 | continue; |
| 512 | |
| 513 | if (nvbo->bo.mem.mem_type == TTM_PL_TT) |
| 514 | b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; |
| 515 | else |
| 516 | b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; |
| 517 | b->presumed.offset = nvbo->bo.offset; |
| 518 | b->presumed.valid = 0; |
| 519 | relocs++; |
| 520 | |
| 521 | if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed, |
| 522 | &b->presumed, sizeof(b->presumed))) |
| 523 | return -EFAULT; |
| 524 | } |
| 525 | } |
| 526 | |
| 527 | return relocs; |
| 528 | } |
| 529 | |
| 530 | static int |
| 531 | nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, |
| 532 | struct drm_file *file_priv, |
| 533 | struct drm_nouveau_gem_pushbuf_bo *pbbo, |
| 534 | uint64_t user_buffers, int nr_buffers, |
| 535 | struct validate_op *op, int *apply_relocs) |
| 536 | { |
| 537 | struct nouveau_cli *cli = nouveau_cli(file_priv); |
| 538 | int ret, relocs = 0; |
| 539 | |
| 540 | INIT_LIST_HEAD(&op->vram_list); |
| 541 | INIT_LIST_HEAD(&op->gart_list); |
| 542 | INIT_LIST_HEAD(&op->both_list); |
| 543 | |
| 544 | if (nr_buffers == 0) |
| 545 | return 0; |
| 546 | |
| 547 | ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); |
| 548 | if (unlikely(ret)) { |
| 549 | if (ret != -ERESTARTSYS) |
| 550 | NV_ERROR(cli, "validate_init\n" ); |
| 551 | return ret; |
| 552 | } |
| 553 | |
| 554 | ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers); |
| 555 | if (unlikely(ret < 0)) { |
| 556 | if (ret != -ERESTARTSYS) |
| 557 | NV_ERROR(cli, "validate vram_list\n" ); |
| 558 | validate_fini(op, NULL); |
| 559 | return ret; |
| 560 | } |
| 561 | relocs += ret; |
| 562 | |
| 563 | ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers); |
| 564 | if (unlikely(ret < 0)) { |
| 565 | if (ret != -ERESTARTSYS) |
| 566 | NV_ERROR(cli, "validate gart_list\n" ); |
| 567 | validate_fini(op, NULL); |
| 568 | return ret; |
| 569 | } |
| 570 | relocs += ret; |
| 571 | |
| 572 | ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers); |
| 573 | if (unlikely(ret < 0)) { |
| 574 | if (ret != -ERESTARTSYS) |
| 575 | NV_ERROR(cli, "validate both_list\n" ); |
| 576 | validate_fini(op, NULL); |
| 577 | return ret; |
| 578 | } |
| 579 | relocs += ret; |
| 580 | |
| 581 | *apply_relocs = relocs; |
| 582 | return 0; |
| 583 | } |
| 584 | |
| 585 | static inline void |
| 586 | u_free(void *addr) |
| 587 | { |
| 588 | if (!is_vmalloc_addr(addr)) |
| 589 | kfree(addr); |
| 590 | else |
| 591 | vfree(addr); |
| 592 | } |
| 593 | |
| 594 | static inline void * |
| 595 | u_memcpya(uint64_t user, unsigned nmemb, unsigned size) |
| 596 | { |
| 597 | void *mem; |
| 598 | void __user *userptr = (void __force __user *)(uintptr_t)user; |
| 599 | |
| 600 | size *= nmemb; |
| 601 | |
| 602 | mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); |
| 603 | if (!mem) |
| 604 | mem = vmalloc(size); |
| 605 | if (!mem) |
| 606 | return ERR_PTR(-ENOMEM); |
| 607 | |
| 608 | if (copy_from_user(mem, userptr, size)) { |
| 609 | u_free(mem); |
| 610 | return ERR_PTR(-EFAULT); |
| 611 | } |
| 612 | |
| 613 | return mem; |
| 614 | } |
| 615 | |
| 616 | static int |
| 617 | nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, |
| 618 | struct drm_nouveau_gem_pushbuf *req, |
| 619 | struct drm_nouveau_gem_pushbuf_bo *bo) |
| 620 | { |
| 621 | struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; |
| 622 | int ret = 0; |
| 623 | unsigned i; |
| 624 | |
| 625 | reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc)); |
| 626 | if (IS_ERR(reloc)) |
| 627 | return PTR_ERR(reloc); |
| 628 | |
| 629 | for (i = 0; i < req->nr_relocs; i++) { |
| 630 | struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i]; |
| 631 | struct drm_nouveau_gem_pushbuf_bo *b; |
| 632 | struct nouveau_bo *nvbo; |
| 633 | uint32_t data; |
| 634 | |
| 635 | if (unlikely(r->bo_index > req->nr_buffers)) { |
| 636 | NV_ERROR(cli, "reloc bo index invalid\n" ); |
| 637 | ret = -EINVAL; |
| 638 | break; |
| 639 | } |
| 640 | |
| 641 | b = &bo[r->bo_index]; |
| 642 | if (b->presumed.valid) |
| 643 | continue; |
| 644 | |
| 645 | if (unlikely(r->reloc_bo_index > req->nr_buffers)) { |
| 646 | NV_ERROR(cli, "reloc container bo index invalid\n" ); |
| 647 | ret = -EINVAL; |
| 648 | break; |
| 649 | } |
| 650 | nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv; |
| 651 | |
| 652 | if (unlikely(r->reloc_bo_offset + 4 > |
| 653 | nvbo->bo.mem.num_pages << PAGE_SHIFT)) { |
| 654 | NV_ERROR(cli, "reloc outside of bo\n" ); |
| 655 | ret = -EINVAL; |
| 656 | break; |
| 657 | } |
| 658 | |
| 659 | if (!nvbo->kmap.virtual) { |
| 660 | ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, |
| 661 | &nvbo->kmap); |
| 662 | if (ret) { |
| 663 | NV_ERROR(cli, "failed kmap for reloc\n" ); |
| 664 | break; |
| 665 | } |
| 666 | nvbo->validate_mapped = true; |
| 667 | } |
| 668 | |
| 669 | if (r->flags & NOUVEAU_GEM_RELOC_LOW) |
| 670 | data = b->presumed.offset + r->data; |
| 671 | else |
| 672 | if (r->flags & NOUVEAU_GEM_RELOC_HIGH) |
| 673 | data = (b->presumed.offset + r->data) >> 32; |
| 674 | else |
| 675 | data = r->data; |
| 676 | |
| 677 | if (r->flags & NOUVEAU_GEM_RELOC_OR) { |
| 678 | if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART) |
| 679 | data |= r->tor; |
| 680 | else |
| 681 | data |= r->vor; |
| 682 | } |
| 683 | |
| 684 | spin_lock(&nvbo->bo.bdev->fence_lock); |
| 685 | ret = ttm_bo_wait(&nvbo->bo, false, false, false); |
| 686 | spin_unlock(&nvbo->bo.bdev->fence_lock); |
| 687 | if (ret) { |
| 688 | NV_ERROR(cli, "reloc wait_idle failed: %d\n" , ret); |
| 689 | break; |
| 690 | } |
| 691 | |
| 692 | nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data); |
| 693 | } |
| 694 | |
| 695 | u_free(reloc); |
| 696 | return ret; |
| 697 | } |
| 698 | |
| 699 | int |
| 700 | nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, |
| 701 | struct drm_file *file_priv) |
| 702 | { |
| 703 | struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); |
| 704 | struct nouveau_cli *cli = nouveau_cli(file_priv); |
| 705 | struct nouveau_abi16_chan *temp; |
| 706 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 707 | struct drm_nouveau_gem_pushbuf *req = data; |
| 708 | struct drm_nouveau_gem_pushbuf_push *push; |
| 709 | struct drm_nouveau_gem_pushbuf_bo *bo; |
| 710 | struct nouveau_channel *chan = NULL; |
| 711 | struct validate_op op; |
| 712 | struct nouveau_fence *fence = NULL; |
| 713 | int i, j, ret = 0, do_reloc = 0; |
| 714 | |
| 715 | if (unlikely(!abi16)) |
| 716 | return -ENOMEM; |
| 717 | |
| 718 | list_for_each_entry(temp, &abi16->channels, head) { |
| 719 | if (temp->chan->handle == (NVDRM_CHAN | req->channel)) { |
| 720 | chan = temp->chan; |
| 721 | break; |
| 722 | } |
| 723 | } |
| 724 | |
| 725 | if (!chan) |
| 726 | return nouveau_abi16_put(abi16, -ENOENT); |
| 727 | |
| 728 | req->vram_available = drm->gem.vram_available; |
| 729 | req->gart_available = drm->gem.gart_available; |
| 730 | if (unlikely(req->nr_push == 0)) |
| 731 | goto out_next; |
| 732 | |
| 733 | if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { |
| 734 | NV_ERROR(cli, "pushbuf push count exceeds limit: %d max %d\n" , |
| 735 | req->nr_push, NOUVEAU_GEM_MAX_PUSH); |
| 736 | return nouveau_abi16_put(abi16, -EINVAL); |
| 737 | } |
| 738 | |
| 739 | if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { |
| 740 | NV_ERROR(cli, "pushbuf bo count exceeds limit: %d max %d\n" , |
| 741 | req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); |
| 742 | return nouveau_abi16_put(abi16, -EINVAL); |
| 743 | } |
| 744 | |
| 745 | if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { |
| 746 | NV_ERROR(cli, "pushbuf reloc count exceeds limit: %d max %d\n" , |
| 747 | req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); |
| 748 | return nouveau_abi16_put(abi16, -EINVAL); |
| 749 | } |
| 750 | |
| 751 | push = u_memcpya(req->push, req->nr_push, sizeof(*push)); |
| 752 | if (IS_ERR(push)) |
| 753 | return nouveau_abi16_put(abi16, PTR_ERR(push)); |
| 754 | |
| 755 | bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); |
| 756 | if (IS_ERR(bo)) { |
| 757 | u_free(push); |
| 758 | return nouveau_abi16_put(abi16, PTR_ERR(bo)); |
| 759 | } |
| 760 | |
| 761 | /* Ensure all push buffers are on validate list */ |
| 762 | for (i = 0; i < req->nr_push; i++) { |
| 763 | if (push[i].bo_index >= req->nr_buffers) { |
| 764 | NV_ERROR(cli, "push %d buffer not in list\n" , i); |
| 765 | ret = -EINVAL; |
| 766 | goto out_prevalid; |
| 767 | } |
| 768 | } |
| 769 | |
| 770 | /* Validate buffer list */ |
| 771 | ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, |
| 772 | req->nr_buffers, &op, &do_reloc); |
| 773 | if (ret) { |
| 774 | if (ret != -ERESTARTSYS) |
| 775 | NV_ERROR(cli, "validate: %d\n" , ret); |
| 776 | goto out_prevalid; |
| 777 | } |
| 778 | |
| 779 | /* Apply any relocations that are required */ |
| 780 | if (do_reloc) { |
| 781 | ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo); |
| 782 | if (ret) { |
| 783 | NV_ERROR(cli, "reloc apply: %d\n" , ret); |
| 784 | goto out; |
| 785 | } |
| 786 | } |
| 787 | |
| 788 | if (chan->dma.ib_max) { |
| 789 | ret = nouveau_dma_wait(chan, req->nr_push + 1, 16); |
| 790 | if (ret) { |
| 791 | NV_ERROR(cli, "nv50cal_space: %d\n" , ret); |
| 792 | goto out; |
| 793 | } |
| 794 | |
| 795 | for (i = 0; i < req->nr_push; i++) { |
| 796 | struct nouveau_bo *nvbo = (void *)(unsigned long) |
| 797 | bo[push[i].bo_index].user_priv; |
| 798 | |
| 799 | nv50_dma_push(chan, nvbo, push[i].offset, |
| 800 | push[i].length); |
| 801 | } |
| 802 | } else |
| 803 | if (nv_device(drm->device)->chipset >= 0x25) { |
| 804 | ret = RING_SPACE(chan, req->nr_push * 2); |
| 805 | if (ret) { |
| 806 | NV_ERROR(cli, "cal_space: %d\n" , ret); |
| 807 | goto out; |
| 808 | } |
| 809 | |
| 810 | for (i = 0; i < req->nr_push; i++) { |
| 811 | struct nouveau_bo *nvbo = (void *)(unsigned long) |
| 812 | bo[push[i].bo_index].user_priv; |
| 813 | |
| 814 | OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2); |
| 815 | OUT_RING(chan, 0); |
| 816 | } |
| 817 | } else { |
| 818 | ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); |
| 819 | if (ret) { |
| 820 | NV_ERROR(cli, "jmp_space: %d\n" , ret); |
| 821 | goto out; |
| 822 | } |
| 823 | |
| 824 | for (i = 0; i < req->nr_push; i++) { |
| 825 | struct nouveau_bo *nvbo = (void *)(unsigned long) |
| 826 | bo[push[i].bo_index].user_priv; |
| 827 | uint32_t cmd; |
| 828 | |
| 829 | cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2); |
| 830 | cmd |= 0x20000000; |
| 831 | if (unlikely(cmd != req->suffix0)) { |
| 832 | if (!nvbo->kmap.virtual) { |
| 833 | ret = ttm_bo_kmap(&nvbo->bo, 0, |
| 834 | nvbo->bo.mem. |
| 835 | num_pages, |
| 836 | &nvbo->kmap); |
| 837 | if (ret) { |
| 838 | WIND_RING(chan); |
| 839 | goto out; |
| 840 | } |
| 841 | nvbo->validate_mapped = true; |
| 842 | } |
| 843 | |
| 844 | nouveau_bo_wr32(nvbo, (push[i].offset + |
| 845 | push[i].length - 8) / 4, cmd); |
| 846 | } |
| 847 | |
| 848 | OUT_RING(chan, 0x20000000 | |
| 849 | (nvbo->bo.offset + push[i].offset)); |
| 850 | OUT_RING(chan, 0); |
| 851 | for (j = 0; j < NOUVEAU_DMA_SKIPS; j++) |
| 852 | OUT_RING(chan, 0); |
| 853 | } |
| 854 | } |
| 855 | |
| 856 | ret = nouveau_fence_new(chan, false, &fence); |
| 857 | if (ret) { |
| 858 | NV_ERROR(cli, "error fencing pushbuf: %d\n" , ret); |
| 859 | WIND_RING(chan); |
| 860 | goto out; |
| 861 | } |
| 862 | |
| 863 | out: |
| 864 | validate_fini(&op, fence); |
| 865 | nouveau_fence_unref(&fence); |
| 866 | |
| 867 | out_prevalid: |
| 868 | u_free(bo); |
| 869 | u_free(push); |
| 870 | |
| 871 | out_next: |
| 872 | if (chan->dma.ib_max) { |
| 873 | req->suffix0 = 0x00000000; |
| 874 | req->suffix1 = 0x00000000; |
| 875 | } else |
| 876 | if (nv_device(drm->device)->chipset >= 0x25) { |
| 877 | req->suffix0 = 0x00020000; |
| 878 | req->suffix1 = 0x00000000; |
| 879 | } else { |
| 880 | req->suffix0 = 0x20000000 | |
| 881 | (chan->push.vma.offset + ((chan->dma.cur + 2) << 2)); |
| 882 | req->suffix1 = 0x00000000; |
| 883 | } |
| 884 | |
| 885 | return nouveau_abi16_put(abi16, ret); |
| 886 | } |
| 887 | |
| 888 | int |
| 889 | nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, |
| 890 | struct drm_file *file_priv) |
| 891 | { |
| 892 | struct drm_nouveau_gem_cpu_prep *req = data; |
| 893 | struct drm_gem_object *gem; |
| 894 | struct nouveau_bo *nvbo; |
| 895 | bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT); |
| 896 | int ret = -EINVAL; |
| 897 | |
| 898 | gem = drm_gem_object_lookup(dev, file_priv, req->handle); |
| 899 | if (!gem) |
| 900 | return -ENOENT; |
| 901 | nvbo = nouveau_gem_object(gem); |
| 902 | |
| 903 | spin_lock(&nvbo->bo.bdev->fence_lock); |
| 904 | ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait); |
| 905 | spin_unlock(&nvbo->bo.bdev->fence_lock); |
| 906 | drm_gem_object_unreference_unlocked(gem); |
| 907 | return ret; |
| 908 | } |
| 909 | |
| 910 | int |
| 911 | nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, |
| 912 | struct drm_file *file_priv) |
| 913 | { |
| 914 | return 0; |
| 915 | } |
| 916 | |
| 917 | int |
| 918 | nouveau_gem_ioctl_info(struct drm_device *dev, void *data, |
| 919 | struct drm_file *file_priv) |
| 920 | { |
| 921 | struct drm_nouveau_gem_info *req = data; |
| 922 | struct drm_gem_object *gem; |
| 923 | int ret; |
| 924 | |
| 925 | gem = drm_gem_object_lookup(dev, file_priv, req->handle); |
| 926 | if (!gem) |
| 927 | return -ENOENT; |
| 928 | |
| 929 | ret = nouveau_gem_info(file_priv, gem, req); |
| 930 | drm_gem_object_unreference_unlocked(gem); |
| 931 | return ret; |
| 932 | } |
| 933 | |
| 934 | |