| 1 | /* |
| 2 | * Copyright 2007-8 Advanced Micro Devices, Inc. |
| 3 | * Copyright 2008 Red Hat Inc. |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * copy of this software and associated documentation files (the "Software"), |
| 7 | * to deal in the Software without restriction, including without limitation |
| 8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 9 | * and/or sell copies of the Software, and to permit persons to whom the |
| 10 | * Software is furnished to do so, subject to the following conditions: |
| 11 | * |
| 12 | * The above copyright notice and this permission notice shall be included in |
| 13 | * all copies or substantial portions of the Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 21 | * OTHER DEALINGS IN THE SOFTWARE. |
| 22 | * |
| 23 | * Authors: Dave Airlie |
| 24 | * Alex Deucher |
| 25 | */ |
| 26 | #include <drm/drmP.h> |
| 27 | #include <drm/radeon_drm.h> |
| 28 | #include "radeon.h" |
| 29 | |
| 30 | static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock) |
| 31 | { |
| 32 | struct radeon_device *rdev = crtc->dev->dev_private; |
| 33 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
| 34 | uint32_t cur_lock; |
| 35 | |
| 36 | if (ASIC_IS_DCE4(rdev)) { |
| 37 | cur_lock = RREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset); |
| 38 | if (lock) |
| 39 | cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK; |
| 40 | else |
| 41 | cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK; |
| 42 | WREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock); |
| 43 | } else if (ASIC_IS_AVIVO(rdev)) { |
| 44 | cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset); |
| 45 | if (lock) |
| 46 | cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK; |
| 47 | else |
| 48 | cur_lock &= ~AVIVO_D1CURSOR_UPDATE_LOCK; |
| 49 | WREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock); |
| 50 | } else { |
| 51 | cur_lock = RREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset); |
| 52 | if (lock) |
| 53 | cur_lock |= RADEON_CUR_LOCK; |
| 54 | else |
| 55 | cur_lock &= ~RADEON_CUR_LOCK; |
| 56 | WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, cur_lock); |
| 57 | } |
| 58 | } |
| 59 | |
| 60 | static void radeon_hide_cursor(struct drm_crtc *crtc) |
| 61 | { |
| 62 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
| 63 | struct radeon_device *rdev = crtc->dev->dev_private; |
| 64 | |
| 65 | if (ASIC_IS_DCE4(rdev)) { |
| 66 | WREG32_IDX(EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset, |
| 67 | EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) | |
| 68 | EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2)); |
| 69 | } else if (ASIC_IS_AVIVO(rdev)) { |
| 70 | WREG32_IDX(AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset, |
| 71 | (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); |
| 72 | } else { |
| 73 | u32 reg; |
| 74 | switch (radeon_crtc->crtc_id) { |
| 75 | case 0: |
| 76 | reg = RADEON_CRTC_GEN_CNTL; |
| 77 | break; |
| 78 | case 1: |
| 79 | reg = RADEON_CRTC2_GEN_CNTL; |
| 80 | break; |
| 81 | default: |
| 82 | return; |
| 83 | } |
| 84 | WREG32_IDX(reg, RREG32_IDX(reg) & ~RADEON_CRTC_CUR_EN); |
| 85 | } |
| 86 | } |
| 87 | |
| 88 | static void radeon_show_cursor(struct drm_crtc *crtc) |
| 89 | { |
| 90 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
| 91 | struct radeon_device *rdev = crtc->dev->dev_private; |
| 92 | |
| 93 | if (ASIC_IS_DCE4(rdev)) { |
| 94 | WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); |
| 95 | WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN | |
| 96 | EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) | |
| 97 | EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2)); |
| 98 | } else if (ASIC_IS_AVIVO(rdev)) { |
| 99 | WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); |
| 100 | WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN | |
| 101 | (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); |
| 102 | } else { |
| 103 | switch (radeon_crtc->crtc_id) { |
| 104 | case 0: |
| 105 | WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL); |
| 106 | break; |
| 107 | case 1: |
| 108 | WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL); |
| 109 | break; |
| 110 | default: |
| 111 | return; |
| 112 | } |
| 113 | |
| 114 | WREG32_P(RADEON_MM_DATA, (RADEON_CRTC_CUR_EN | |
| 115 | (RADEON_CRTC_CUR_MODE_24BPP << RADEON_CRTC_CUR_MODE_SHIFT)), |
| 116 | ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_CUR_MODE_MASK)); |
| 117 | } |
| 118 | } |
| 119 | |
| 120 | static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, |
| 121 | uint64_t gpu_addr) |
| 122 | { |
| 123 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
| 124 | struct radeon_device *rdev = crtc->dev->dev_private; |
| 125 | |
| 126 | if (ASIC_IS_DCE4(rdev)) { |
| 127 | WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, |
| 128 | upper_32_bits(gpu_addr)); |
| 129 | WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, |
| 130 | gpu_addr & 0xffffffff); |
| 131 | } else if (ASIC_IS_AVIVO(rdev)) { |
| 132 | if (rdev->family >= CHIP_RV770) { |
| 133 | if (radeon_crtc->crtc_id) |
| 134 | WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr)); |
| 135 | else |
| 136 | WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr)); |
| 137 | } |
| 138 | WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, |
| 139 | gpu_addr & 0xffffffff); |
| 140 | } else { |
| 141 | radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr; |
| 142 | /* offset is from DISP(2)_BASE_ADDRESS */ |
| 143 | WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset); |
| 144 | } |
| 145 | } |
| 146 | |
| 147 | int radeon_crtc_cursor_set(struct drm_crtc *crtc, |
| 148 | struct drm_file *file_priv, |
| 149 | uint32_t handle, |
| 150 | uint32_t width, |
| 151 | uint32_t height) |
| 152 | { |
| 153 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
| 154 | struct radeon_device *rdev = crtc->dev->dev_private; |
| 155 | struct drm_gem_object *obj; |
| 156 | struct radeon_bo *robj; |
| 157 | uint64_t gpu_addr; |
| 158 | int ret; |
| 159 | |
| 160 | if (!handle) { |
| 161 | /* turn off cursor */ |
| 162 | radeon_hide_cursor(crtc); |
| 163 | obj = NULL; |
| 164 | goto unpin; |
| 165 | } |
| 166 | |
| 167 | if ((width > radeon_crtc->max_cursor_width) || |
| 168 | (height > radeon_crtc->max_cursor_height)) { |
| 169 | DRM_ERROR("bad cursor width or height %d x %d\n" , width, height); |
| 170 | return -EINVAL; |
| 171 | } |
| 172 | |
| 173 | obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); |
| 174 | if (!obj) { |
| 175 | DRM_ERROR("Cannot find cursor object %x for crtc %d\n" , handle, radeon_crtc->crtc_id); |
| 176 | return -ENOENT; |
| 177 | } |
| 178 | |
| 179 | robj = gem_to_radeon_bo(obj); |
| 180 | ret = radeon_bo_reserve(robj, false); |
| 181 | if (unlikely(ret != 0)) |
| 182 | goto fail; |
| 183 | /* Only 27 bit offset for legacy cursor */ |
| 184 | ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM, |
| 185 | ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, |
| 186 | &gpu_addr); |
| 187 | radeon_bo_unreserve(robj); |
| 188 | if (ret) |
| 189 | goto fail; |
| 190 | |
| 191 | radeon_crtc->cursor_width = width; |
| 192 | radeon_crtc->cursor_height = height; |
| 193 | |
| 194 | radeon_lock_cursor(crtc, true); |
| 195 | radeon_set_cursor(crtc, obj, gpu_addr); |
| 196 | radeon_show_cursor(crtc); |
| 197 | radeon_lock_cursor(crtc, false); |
| 198 | |
| 199 | unpin: |
| 200 | if (radeon_crtc->cursor_bo) { |
| 201 | robj = gem_to_radeon_bo(radeon_crtc->cursor_bo); |
| 202 | ret = radeon_bo_reserve(robj, false); |
| 203 | if (likely(ret == 0)) { |
| 204 | radeon_bo_unpin(robj); |
| 205 | radeon_bo_unreserve(robj); |
| 206 | } |
| 207 | drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo); |
| 208 | } |
| 209 | |
| 210 | radeon_crtc->cursor_bo = obj; |
| 211 | return 0; |
| 212 | fail: |
| 213 | drm_gem_object_unreference_unlocked(obj); |
| 214 | |
| 215 | return ret; |
| 216 | } |
| 217 | |
| 218 | int radeon_crtc_cursor_move(struct drm_crtc *crtc, |
| 219 | int x, int y) |
| 220 | { |
| 221 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
| 222 | struct radeon_device *rdev = crtc->dev->dev_private; |
| 223 | int xorigin = 0, yorigin = 0; |
| 224 | int w = radeon_crtc->cursor_width; |
| 225 | |
| 226 | if (ASIC_IS_AVIVO(rdev)) { |
| 227 | /* avivo cursor are offset into the total surface */ |
| 228 | x += crtc->x; |
| 229 | y += crtc->y; |
| 230 | } |
| 231 | DRM_DEBUG("x %d y %d c->x %d c->y %d\n" , x, y, crtc->x, crtc->y); |
| 232 | |
| 233 | if (x < 0) { |
| 234 | xorigin = min(-x, radeon_crtc->max_cursor_width - 1); |
| 235 | x = 0; |
| 236 | } |
| 237 | if (y < 0) { |
| 238 | yorigin = min(-y, radeon_crtc->max_cursor_height - 1); |
| 239 | y = 0; |
| 240 | } |
| 241 | |
| 242 | /* fixed on DCE6 and newer */ |
| 243 | if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) { |
| 244 | int i = 0; |
| 245 | struct drm_crtc *crtc_p; |
| 246 | |
| 247 | /* |
| 248 | * avivo cursor image can't end on 128 pixel boundary or |
| 249 | * go past the end of the frame if both crtcs are enabled |
| 250 | * |
| 251 | * NOTE: It is safe to access crtc->enabled of other crtcs |
| 252 | * without holding either the mode_config lock or the other |
| 253 | * crtc's lock as long as write access to this flag _always_ |
| 254 | * grabs all locks. |
| 255 | */ |
| 256 | list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) { |
| 257 | if (crtc_p->enabled) |
| 258 | i++; |
| 259 | } |
| 260 | if (i > 1) { |
| 261 | int cursor_end, frame_end; |
| 262 | |
| 263 | cursor_end = x - xorigin + w; |
| 264 | frame_end = crtc->x + crtc->mode.crtc_hdisplay; |
| 265 | if (cursor_end >= frame_end) { |
| 266 | w = w - (cursor_end - frame_end); |
| 267 | if (!(frame_end & 0x7f)) |
| 268 | w--; |
| 269 | } else { |
| 270 | if (!(cursor_end & 0x7f)) |
| 271 | w--; |
| 272 | } |
| 273 | if (w <= 0) { |
| 274 | w = 1; |
| 275 | cursor_end = x - xorigin + w; |
| 276 | if (!(cursor_end & 0x7f)) { |
| 277 | x--; |
| 278 | WARN_ON_ONCE(x < 0); |
| 279 | } |
| 280 | } |
| 281 | } |
| 282 | } |
| 283 | |
| 284 | radeon_lock_cursor(crtc, true); |
| 285 | if (ASIC_IS_DCE4(rdev)) { |
| 286 | WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y); |
| 287 | WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); |
| 288 | WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, |
| 289 | ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); |
| 290 | } else if (ASIC_IS_AVIVO(rdev)) { |
| 291 | WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y); |
| 292 | WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); |
| 293 | WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset, |
| 294 | ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); |
| 295 | } else { |
| 296 | if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN) |
| 297 | y *= 2; |
| 298 | |
| 299 | WREG32(RADEON_CUR_HORZ_VERT_OFF + radeon_crtc->crtc_offset, |
| 300 | (RADEON_CUR_LOCK |
| 301 | | (xorigin << 16) |
| 302 | | yorigin)); |
| 303 | WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset, |
| 304 | (RADEON_CUR_LOCK |
| 305 | | (x << 16) |
| 306 | | y)); |
| 307 | /* offset is from DISP(2)_BASE_ADDRESS */ |
| 308 | WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + |
| 309 | (yorigin * 256))); |
| 310 | } |
| 311 | radeon_lock_cursor(crtc, false); |
| 312 | |
| 313 | return 0; |
| 314 | } |
| 315 | |