| 1 | /* $NetBSD: uvm_mremap.c,v 1.18 2015/11/26 13:15:34 martin Exp $ */ |
| 2 | |
| 3 | /*- |
| 4 | * Copyright (c)2006,2007,2009 YAMAMOTO Takashi, |
| 5 | * All rights reserved. |
| 6 | * |
| 7 | * Redistribution and use in source and binary forms, with or without |
| 8 | * modification, are permitted provided that the following conditions |
| 9 | * are met: |
| 10 | * 1. Redistributions of source code must retain the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer. |
| 12 | * 2. Redistributions in binary form must reproduce the above copyright |
| 13 | * notice, this list of conditions and the following disclaimer in the |
| 14 | * documentation and/or other materials provided with the distribution. |
| 15 | * |
| 16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
| 17 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 19 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
| 20 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 21 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 22 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 23 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 24 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 25 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 26 | * SUCH DAMAGE. |
| 27 | */ |
| 28 | |
| 29 | #include <sys/cdefs.h> |
| 30 | __KERNEL_RCSID(0, "$NetBSD: uvm_mremap.c,v 1.18 2015/11/26 13:15:34 martin Exp $" ); |
| 31 | |
| 32 | #include <sys/param.h> |
| 33 | #include <sys/mman.h> |
| 34 | #include <sys/sched.h> |
| 35 | #include <sys/syscallargs.h> |
| 36 | #include <sys/proc.h> |
| 37 | #include <sys/atomic.h> |
| 38 | |
| 39 | #include <uvm/uvm.h> |
| 40 | |
| 41 | static int |
| 42 | uvm_mapent_extend(struct vm_map *map, vaddr_t endva, vsize_t size) |
| 43 | { |
| 44 | struct vm_map_entry *entry; |
| 45 | struct vm_map_entry *reserved_entry; |
| 46 | struct uvm_object *uobj; |
| 47 | int error = 0; |
| 48 | |
| 49 | vm_map_lock(map); |
| 50 | if (!uvm_map_lookup_entry(map, endva, &reserved_entry)) { |
| 51 | error = ENOENT; |
| 52 | goto done; |
| 53 | } |
| 54 | if (reserved_entry->start != endva || |
| 55 | reserved_entry->end != endva + size || |
| 56 | reserved_entry->object.uvm_obj != NULL || |
| 57 | reserved_entry->aref.ar_amap != NULL || |
| 58 | reserved_entry->protection != VM_PROT_NONE) { |
| 59 | error = EINVAL; |
| 60 | goto done; |
| 61 | } |
| 62 | entry = reserved_entry->prev; |
| 63 | if (&map->header == entry || entry->end != endva) { |
| 64 | error = EINVAL; |
| 65 | goto done; |
| 66 | } |
| 67 | |
| 68 | /* |
| 69 | * now, make reserved_entry compatible with entry, and then |
| 70 | * try to merge. |
| 71 | */ |
| 72 | |
| 73 | uobj = entry->object.uvm_obj; |
| 74 | if (uobj) { |
| 75 | voff_t offset = entry->offset; |
| 76 | voff_t newoffset; |
| 77 | |
| 78 | newoffset = offset + entry->end - entry->start; |
| 79 | if (newoffset <= offset) { |
| 80 | error = E2BIG; /* XXX */ |
| 81 | goto done; |
| 82 | } |
| 83 | mutex_enter(uobj->vmobjlock); |
| 84 | KASSERT(uobj->uo_refs > 0); |
| 85 | atomic_inc_uint(&uobj->uo_refs); |
| 86 | mutex_exit(uobj->vmobjlock); |
| 87 | reserved_entry->object.uvm_obj = uobj; |
| 88 | reserved_entry->offset = newoffset; |
| 89 | } |
| 90 | reserved_entry->etype = entry->etype; |
| 91 | if (UVM_ET_ISCOPYONWRITE(entry)) { |
| 92 | reserved_entry->etype |= UVM_ET_NEEDSCOPY; |
| 93 | } |
| 94 | reserved_entry->flags &= ~UVM_MAP_NOMERGE; |
| 95 | reserved_entry->protection = entry->protection; |
| 96 | reserved_entry->max_protection = entry->max_protection; |
| 97 | reserved_entry->inheritance = entry->inheritance; |
| 98 | reserved_entry->advice = entry->advice; |
| 99 | reserved_entry->wired_count = 0; /* XXX should inherit? */ |
| 100 | uvm_mapent_trymerge(map, reserved_entry, 0); |
| 101 | done: |
| 102 | vm_map_unlock(map); |
| 103 | |
| 104 | return error; |
| 105 | } |
| 106 | |
| 107 | /* |
| 108 | * uvm_mremap: move and/or resize existing mappings. |
| 109 | */ |
| 110 | |
| 111 | int |
| 112 | uvm_mremap(struct vm_map *oldmap, vaddr_t oldva, vsize_t oldsize, |
| 113 | struct vm_map *newmap, vaddr_t *newvap, vsize_t newsize, |
| 114 | struct proc *newproc, int flags) |
| 115 | { |
| 116 | vaddr_t dstva; |
| 117 | vsize_t movesize; |
| 118 | vaddr_t newva; |
| 119 | int alignshift; |
| 120 | vaddr_t align = 0; |
| 121 | int error = 0; |
| 122 | const bool fixed = (flags & MAP_FIXED) != 0; |
| 123 | |
| 124 | if (fixed) { |
| 125 | newva = *newvap; |
| 126 | } else { |
| 127 | newva = 0; |
| 128 | } |
| 129 | if ((oldva & PAGE_MASK) != 0 || |
| 130 | (newva & PAGE_MASK) != 0 || |
| 131 | (oldsize & PAGE_MASK) != 0 || |
| 132 | (newsize & PAGE_MASK) != 0) { |
| 133 | return EINVAL; |
| 134 | } |
| 135 | /* XXX zero-size should be allowed? */ |
| 136 | if (oldva + oldsize <= oldva || newva + newsize <= newva) { |
| 137 | return EINVAL; |
| 138 | } |
| 139 | |
| 140 | /* |
| 141 | * Try to see if any requested alignment can even be attempted. |
| 142 | * Make sure we can express the alignment (asking for a >= 4GB |
| 143 | * alignment on an ILP32 architecure make no sense) and the |
| 144 | * alignment is at least for a page sized quanitiy. If the |
| 145 | * request was for a fixed mapping, make sure supplied address |
| 146 | * adheres to the request alignment. |
| 147 | */ |
| 148 | alignshift = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT; |
| 149 | if (alignshift != 0) { |
| 150 | if (alignshift >= sizeof(vaddr_t) * NBBY) |
| 151 | return EINVAL; |
| 152 | align = 1L << alignshift; |
| 153 | if (align < PAGE_SIZE) |
| 154 | return EINVAL; |
| 155 | if (align >= vm_map_max(oldmap)) |
| 156 | return ENOMEM; |
| 157 | if ((flags & MAP_FIXED) != 0) { |
| 158 | if ((*newvap & (align - 1)) != 0) |
| 159 | return EINVAL; |
| 160 | align = 0; |
| 161 | } |
| 162 | } |
| 163 | |
| 164 | /* |
| 165 | * check the easy cases first. |
| 166 | */ |
| 167 | |
| 168 | if ((!fixed || newva == oldva) && newmap == oldmap && |
| 169 | (align == 0 || (oldva & (align - 1)) == 0)) { |
| 170 | vaddr_t va; |
| 171 | |
| 172 | if (newsize == oldsize) { |
| 173 | newva = oldva; |
| 174 | goto done; |
| 175 | } |
| 176 | if (newsize < oldsize) { |
| 177 | uvm_unmap(oldmap, oldva + newsize, oldva + oldsize); |
| 178 | newva = oldva; |
| 179 | goto done; |
| 180 | } |
| 181 | va = oldva + oldsize; |
| 182 | if (uvm_map_reserve(oldmap, newsize - oldsize, 0, 0, &va, |
| 183 | UVM_FLAG_FIXED)) { |
| 184 | newva = oldva; |
| 185 | goto extend; |
| 186 | } |
| 187 | if (fixed) { |
| 188 | return ENOMEM; |
| 189 | } |
| 190 | } |
| 191 | |
| 192 | /* |
| 193 | * we need to move mappings. |
| 194 | */ |
| 195 | |
| 196 | if (!fixed) { |
| 197 | KASSERT(&newproc->p_vmspace->vm_map == newmap); |
| 198 | newva = newproc->p_emul->e_vm_default_addr(newproc, |
| 199 | (vaddr_t)newproc->p_vmspace->vm_daddr, newsize, |
| 200 | newproc->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN); |
| 201 | } |
| 202 | dstva = newva; |
| 203 | if (!uvm_map_reserve(newmap, newsize, oldva, align, &dstva, |
| 204 | fixed ? UVM_FLAG_FIXED : 0)) { |
| 205 | return ENOMEM; |
| 206 | } |
| 207 | KASSERT(!fixed || dstva == newva); |
| 208 | newva = dstva; |
| 209 | movesize = MIN(oldsize, newsize); |
| 210 | error = uvm_map_extract(oldmap, oldva, movesize, newmap, &dstva, |
| 211 | UVM_EXTRACT_RESERVED); |
| 212 | KASSERT(dstva == newva); |
| 213 | if (error != 0) { |
| 214 | /* |
| 215 | * undo uvm_map_reserve. |
| 216 | */ |
| 217 | uvm_unmap(newmap, newva, newva + newsize); |
| 218 | return error; |
| 219 | } |
| 220 | if (newsize > oldsize) { |
| 221 | extend: |
| 222 | error = uvm_mapent_extend(newmap, newva + oldsize, |
| 223 | newsize - oldsize); |
| 224 | if (error != 0) { |
| 225 | /* |
| 226 | * undo uvm_map_reserve and uvm_map_extract. |
| 227 | */ |
| 228 | if (newva == oldva && newmap == oldmap) { |
| 229 | uvm_unmap(newmap, newva + oldsize, |
| 230 | newva + newsize); |
| 231 | } else { |
| 232 | uvm_unmap(newmap, newva, newva + newsize); |
| 233 | } |
| 234 | return error; |
| 235 | } |
| 236 | } |
| 237 | |
| 238 | /* |
| 239 | * now we won't fail. |
| 240 | * remove original entries unless we did in-place extend. |
| 241 | */ |
| 242 | |
| 243 | if (oldva != newva || oldmap != newmap) { |
| 244 | uvm_unmap(oldmap, oldva, oldva + oldsize); |
| 245 | } |
| 246 | done: |
| 247 | *newvap = newva; |
| 248 | return 0; |
| 249 | } |
| 250 | |
| 251 | /* |
| 252 | * sys_mremap: mremap system call. |
| 253 | */ |
| 254 | |
| 255 | int |
| 256 | sys_mremap(struct lwp *l, const struct sys_mremap_args *uap, register_t *retval) |
| 257 | { |
| 258 | /* { |
| 259 | syscallarg(void *) old_address; |
| 260 | syscallarg(size_t) old_size; |
| 261 | syscallarg(void *) new_address; |
| 262 | syscallarg(size_t) new_size; |
| 263 | syscallarg(int) flags; |
| 264 | } */ |
| 265 | |
| 266 | struct proc *p; |
| 267 | struct vm_map *map; |
| 268 | vaddr_t oldva; |
| 269 | vaddr_t newva; |
| 270 | size_t oldsize; |
| 271 | size_t newsize; |
| 272 | int flags; |
| 273 | int error; |
| 274 | |
| 275 | flags = SCARG(uap, flags); |
| 276 | oldva = (vaddr_t)SCARG(uap, old_address); |
| 277 | oldsize = (vsize_t)(SCARG(uap, old_size)); |
| 278 | newva = (vaddr_t)SCARG(uap, new_address); |
| 279 | newsize = (vsize_t)(SCARG(uap, new_size)); |
| 280 | |
| 281 | if ((flags & ~(MAP_FIXED | MAP_ALIGNMENT_MASK)) != 0) { |
| 282 | error = EINVAL; |
| 283 | goto done; |
| 284 | } |
| 285 | |
| 286 | oldsize = round_page(oldsize); |
| 287 | newsize = round_page(newsize); |
| 288 | |
| 289 | p = l->l_proc; |
| 290 | map = &p->p_vmspace->vm_map; |
| 291 | error = uvm_mremap(map, oldva, oldsize, map, &newva, newsize, p, flags); |
| 292 | |
| 293 | done: |
| 294 | *retval = (error != 0) ? 0 : (register_t)newva; |
| 295 | return error; |
| 296 | } |
| 297 | |