| 1 | /* $NetBSD: linux_kmap.c,v 1.12 2015/01/01 01:15:43 mrg Exp $ */ |
| 2 | |
| 3 | /*- |
| 4 | * Copyright (c) 2013 The NetBSD Foundation, Inc. |
| 5 | * All rights reserved. |
| 6 | * |
| 7 | * This code is derived from software contributed to The NetBSD Foundation |
| 8 | * by Taylor R. Campbell. |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or without |
| 11 | * modification, are permitted provided that the following conditions |
| 12 | * are met: |
| 13 | * 1. Redistributions of source code must retain the above copyright |
| 14 | * notice, this list of conditions and the following disclaimer. |
| 15 | * 2. Redistributions in binary form must reproduce the above copyright |
| 16 | * notice, this list of conditions and the following disclaimer in the |
| 17 | * documentation and/or other materials provided with the distribution. |
| 18 | * |
| 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
| 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
| 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
| 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
| 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 29 | * POSSIBILITY OF SUCH DAMAGE. |
| 30 | */ |
| 31 | |
| 32 | #include <sys/cdefs.h> |
| 33 | __KERNEL_RCSID(0, "$NetBSD: linux_kmap.c,v 1.12 2015/01/01 01:15:43 mrg Exp $" ); |
| 34 | |
| 35 | #include <sys/types.h> |
| 36 | #include <sys/kmem.h> |
| 37 | #include <sys/mutex.h> |
| 38 | #include <sys/rbtree.h> |
| 39 | |
| 40 | #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS |
| 41 | #include <dev/mm.h> |
| 42 | #endif |
| 43 | |
| 44 | #include <uvm/uvm_extern.h> |
| 45 | |
| 46 | #include <linux/highmem.h> |
| 47 | |
| 48 | /* |
| 49 | * XXX Kludgerific implementation of Linux kmap_atomic, which is |
| 50 | * required not to fail. To accomodate this, we reserve one page of |
| 51 | * kva at boot (or load) and limit the system to at most kmap_atomic in |
| 52 | * use at a time. |
| 53 | */ |
| 54 | |
| 55 | static kmutex_t linux_kmap_atomic_lock; |
| 56 | static vaddr_t linux_kmap_atomic_vaddr; |
| 57 | |
| 58 | static kmutex_t linux_kmap_lock; |
| 59 | static rb_tree_t linux_kmap_entries; |
| 60 | |
| 61 | struct linux_kmap_entry { |
| 62 | paddr_t lke_paddr; |
| 63 | vaddr_t lke_vaddr; |
| 64 | unsigned int lke_refcnt; |
| 65 | rb_node_t lke_node; |
| 66 | }; |
| 67 | |
| 68 | static int |
| 69 | lke_compare_nodes(void *ctx __unused, const void *an, const void *bn) |
| 70 | { |
| 71 | const struct linux_kmap_entry *const a = an; |
| 72 | const struct linux_kmap_entry *const b = bn; |
| 73 | |
| 74 | if (a->lke_paddr < b->lke_paddr) |
| 75 | return -1; |
| 76 | else if (a->lke_paddr > b->lke_paddr) |
| 77 | return +1; |
| 78 | else |
| 79 | return 0; |
| 80 | } |
| 81 | |
| 82 | static int |
| 83 | lke_compare_key(void *ctx __unused, const void *node, const void *key) |
| 84 | { |
| 85 | const struct linux_kmap_entry *const lke = node; |
| 86 | const paddr_t *const paddrp = key; |
| 87 | |
| 88 | if (lke->lke_paddr < *paddrp) |
| 89 | return -1; |
| 90 | else if (lke->lke_paddr > *paddrp) |
| 91 | return +1; |
| 92 | else |
| 93 | return 0; |
| 94 | } |
| 95 | |
| 96 | static const rb_tree_ops_t linux_kmap_entry_ops = { |
| 97 | .rbto_compare_nodes = &lke_compare_nodes, |
| 98 | .rbto_compare_key = &lke_compare_key, |
| 99 | .rbto_node_offset = offsetof(struct linux_kmap_entry, lke_node), |
| 100 | .rbto_context = NULL, |
| 101 | }; |
| 102 | |
| 103 | int |
| 104 | linux_kmap_init(void) |
| 105 | { |
| 106 | |
| 107 | /* IPL_VM since interrupt handlers use kmap_atomic. */ |
| 108 | mutex_init(&linux_kmap_atomic_lock, MUTEX_DEFAULT, IPL_VM); |
| 109 | |
| 110 | linux_kmap_atomic_vaddr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, |
| 111 | (UVM_KMF_VAONLY | UVM_KMF_WAITVA)); |
| 112 | |
| 113 | KASSERT(linux_kmap_atomic_vaddr != 0); |
| 114 | KASSERT(!pmap_extract(pmap_kernel(), linux_kmap_atomic_vaddr, NULL)); |
| 115 | |
| 116 | mutex_init(&linux_kmap_lock, MUTEX_DEFAULT, IPL_NONE); |
| 117 | rb_tree_init(&linux_kmap_entries, &linux_kmap_entry_ops); |
| 118 | |
| 119 | return 0; |
| 120 | } |
| 121 | |
| 122 | void |
| 123 | linux_kmap_fini(void) |
| 124 | { |
| 125 | |
| 126 | KASSERT(RB_TREE_MIN(&linux_kmap_entries) == NULL); |
| 127 | #if 0 /* XXX no rb_tree_destroy */ |
| 128 | rb_tree_destroy(&linux_kmap_entries); |
| 129 | #endif |
| 130 | mutex_destroy(&linux_kmap_lock); |
| 131 | |
| 132 | KASSERT(linux_kmap_atomic_vaddr != 0); |
| 133 | KASSERT(!pmap_extract(pmap_kernel(), linux_kmap_atomic_vaddr, NULL)); |
| 134 | |
| 135 | uvm_km_free(kernel_map, linux_kmap_atomic_vaddr, PAGE_SIZE, |
| 136 | (UVM_KMF_VAONLY | UVM_KMF_WAITVA)); |
| 137 | |
| 138 | mutex_destroy(&linux_kmap_atomic_lock); |
| 139 | } |
| 140 | |
| 141 | void * |
| 142 | kmap_atomic(struct page *page) |
| 143 | { |
| 144 | const paddr_t paddr = uvm_vm_page_to_phys(&page->p_vmp); |
| 145 | vaddr_t vaddr; |
| 146 | |
| 147 | #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS |
| 148 | if (mm_md_direct_mapped_phys(paddr, &vaddr)) |
| 149 | return (void *)vaddr; |
| 150 | #endif |
| 151 | |
| 152 | mutex_spin_enter(&linux_kmap_atomic_lock); |
| 153 | KASSERT(linux_kmap_atomic_vaddr != 0); |
| 154 | KASSERT(!pmap_extract(pmap_kernel(), linux_kmap_atomic_vaddr, NULL)); |
| 155 | vaddr = linux_kmap_atomic_vaddr; |
| 156 | pmap_kenter_pa(vaddr, paddr, (VM_PROT_READ | VM_PROT_WRITE), 0); |
| 157 | pmap_update(pmap_kernel()); |
| 158 | |
| 159 | return (void *)vaddr; |
| 160 | } |
| 161 | |
| 162 | void |
| 163 | kunmap_atomic(void *addr) |
| 164 | { |
| 165 | const vaddr_t vaddr = (vaddr_t)addr; |
| 166 | |
| 167 | #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS |
| 168 | { |
| 169 | paddr_t paddr; |
| 170 | vaddr_t vaddr1; |
| 171 | bool ok __diagused; |
| 172 | |
| 173 | ok = pmap_extract(pmap_kernel(), vaddr, &paddr); |
| 174 | KASSERT(ok); |
| 175 | if (mm_md_direct_mapped_phys(paddr, &vaddr1) && vaddr1 == vaddr) |
| 176 | return; |
| 177 | } |
| 178 | #endif |
| 179 | |
| 180 | KASSERT(mutex_owned(&linux_kmap_atomic_lock)); |
| 181 | KASSERT(linux_kmap_atomic_vaddr == vaddr); |
| 182 | KASSERT(pmap_extract(pmap_kernel(), vaddr, NULL)); |
| 183 | |
| 184 | pmap_kremove(vaddr, PAGE_SIZE); |
| 185 | pmap_update(pmap_kernel()); |
| 186 | |
| 187 | mutex_spin_exit(&linux_kmap_atomic_lock); |
| 188 | } |
| 189 | |
| 190 | void * |
| 191 | kmap(struct page *page) |
| 192 | { |
| 193 | const paddr_t paddr = VM_PAGE_TO_PHYS(&page->p_vmp); |
| 194 | vaddr_t vaddr; |
| 195 | |
| 196 | ASSERT_SLEEPABLE(); |
| 197 | |
| 198 | #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS |
| 199 | if (mm_md_direct_mapped_phys(paddr, &vaddr)) |
| 200 | return (void *)vaddr; |
| 201 | #endif |
| 202 | |
| 203 | vaddr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, |
| 204 | (UVM_KMF_VAONLY | UVM_KMF_WAITVA)); |
| 205 | KASSERT(vaddr != 0); |
| 206 | |
| 207 | struct linux_kmap_entry *const lke = kmem_alloc(sizeof(*lke), |
| 208 | KM_SLEEP); |
| 209 | lke->lke_paddr = paddr; |
| 210 | lke->lke_vaddr = vaddr; |
| 211 | |
| 212 | mutex_enter(&linux_kmap_lock); |
| 213 | struct linux_kmap_entry *const collision __diagused = |
| 214 | rb_tree_insert_node(&linux_kmap_entries, lke); |
| 215 | KASSERT(collision == lke); |
| 216 | mutex_exit(&linux_kmap_lock); |
| 217 | |
| 218 | KASSERT(!pmap_extract(pmap_kernel(), vaddr, NULL)); |
| 219 | pmap_kenter_pa(vaddr, paddr, (VM_PROT_READ | VM_PROT_WRITE), 0); |
| 220 | pmap_update(pmap_kernel()); |
| 221 | |
| 222 | return (void *)vaddr; |
| 223 | } |
| 224 | |
| 225 | void |
| 226 | kunmap(struct page *page) |
| 227 | { |
| 228 | const paddr_t paddr = VM_PAGE_TO_PHYS(&page->p_vmp); |
| 229 | |
| 230 | ASSERT_SLEEPABLE(); |
| 231 | |
| 232 | #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS |
| 233 | { |
| 234 | vaddr_t vaddr1; |
| 235 | |
| 236 | if (mm_md_direct_mapped_phys(paddr, &vaddr1)) |
| 237 | return; |
| 238 | } |
| 239 | #endif |
| 240 | |
| 241 | mutex_enter(&linux_kmap_lock); |
| 242 | struct linux_kmap_entry *const lke = |
| 243 | rb_tree_find_node(&linux_kmap_entries, &paddr); |
| 244 | KASSERT(lke != NULL); |
| 245 | rb_tree_remove_node(&linux_kmap_entries, lke); |
| 246 | mutex_exit(&linux_kmap_lock); |
| 247 | |
| 248 | const vaddr_t vaddr = lke->lke_vaddr; |
| 249 | kmem_free(lke, sizeof(*lke)); |
| 250 | |
| 251 | KASSERT(pmap_extract(pmap_kernel(), vaddr, NULL)); |
| 252 | |
| 253 | pmap_kremove(vaddr, PAGE_SIZE); |
| 254 | pmap_update(pmap_kernel()); |
| 255 | |
| 256 | uvm_km_free(kernel_map, vaddr, PAGE_SIZE, UVM_KMF_VAONLY); |
| 257 | } |
| 258 | |