| 1 | /* $NetBSD: uvm_map.h,v 1.73 2016/05/25 17:43:58 christos Exp $ */ |
| 2 | |
| 3 | /* |
| 4 | * Copyright (c) 1997 Charles D. Cranor and Washington University. |
| 5 | * Copyright (c) 1991, 1993, The Regents of the University of California. |
| 6 | * |
| 7 | * All rights reserved. |
| 8 | * |
| 9 | * This code is derived from software contributed to Berkeley by |
| 10 | * The Mach Operating System project at Carnegie-Mellon University. |
| 11 | * |
| 12 | * Redistribution and use in source and binary forms, with or without |
| 13 | * modification, are permitted provided that the following conditions |
| 14 | * are met: |
| 15 | * 1. Redistributions of source code must retain the above copyright |
| 16 | * notice, this list of conditions and the following disclaimer. |
| 17 | * 2. Redistributions in binary form must reproduce the above copyright |
| 18 | * notice, this list of conditions and the following disclaimer in the |
| 19 | * documentation and/or other materials provided with the distribution. |
| 20 | * 3. Neither the name of the University nor the names of its contributors |
| 21 | * may be used to endorse or promote products derived from this software |
| 22 | * without specific prior written permission. |
| 23 | * |
| 24 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
| 25 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
| 28 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 29 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 30 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 31 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 32 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 33 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 34 | * SUCH DAMAGE. |
| 35 | * |
| 36 | * @(#)vm_map.h 8.3 (Berkeley) 3/15/94 |
| 37 | * from: Id: uvm_map.h,v 1.1.2.3 1998/02/07 01:16:55 chs Exp |
| 38 | * |
| 39 | * |
| 40 | * Copyright (c) 1987, 1990 Carnegie-Mellon University. |
| 41 | * All rights reserved. |
| 42 | * |
| 43 | * Permission to use, copy, modify and distribute this software and |
| 44 | * its documentation is hereby granted, provided that both the copyright |
| 45 | * notice and this permission notice appear in all copies of the |
| 46 | * software, derivative works or modified versions, and any portions |
| 47 | * thereof, and that both notices appear in supporting documentation. |
| 48 | * |
| 49 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
| 50 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND |
| 51 | * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
| 52 | * |
| 53 | * Carnegie Mellon requests users of this software to return to |
| 54 | * |
| 55 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
| 56 | * School of Computer Science |
| 57 | * Carnegie Mellon University |
| 58 | * Pittsburgh PA 15213-3890 |
| 59 | * |
| 60 | * any improvements or extensions that they make and grant Carnegie the |
| 61 | * rights to redistribute these changes. |
| 62 | */ |
| 63 | |
| 64 | #ifndef _UVM_UVM_MAP_H_ |
| 65 | #define _UVM_UVM_MAP_H_ |
| 66 | |
| 67 | /* |
| 68 | * uvm_map.h |
| 69 | */ |
| 70 | |
| 71 | #ifdef _KERNEL |
| 72 | |
| 73 | /* |
| 74 | * macros |
| 75 | */ |
| 76 | |
| 77 | /* |
| 78 | * UVM_MAP_CLIP_START: ensure that the entry begins at or after |
| 79 | * the starting address, if it doesn't we split the entry. |
| 80 | * |
| 81 | * => map must be locked by caller |
| 82 | */ |
| 83 | |
| 84 | #define UVM_MAP_CLIP_START(MAP,ENTRY,VA) { \ |
| 85 | if ((VA) > (ENTRY)->start && (VA) < (ENTRY)->end) { \ |
| 86 | uvm_map_clip_start(MAP,ENTRY,VA); \ |
| 87 | } \ |
| 88 | } |
| 89 | |
| 90 | /* |
| 91 | * UVM_MAP_CLIP_END: ensure that the entry ends at or before |
| 92 | * the ending address, if it does't we split the entry. |
| 93 | * |
| 94 | * => map must be locked by caller |
| 95 | */ |
| 96 | |
| 97 | #define UVM_MAP_CLIP_END(MAP,ENTRY,VA) { \ |
| 98 | if ((VA) > (ENTRY)->start && (VA) < (ENTRY)->end) { \ |
| 99 | uvm_map_clip_end(MAP,ENTRY,VA); \ |
| 100 | } \ |
| 101 | } |
| 102 | |
| 103 | /* |
| 104 | * extract flags |
| 105 | */ |
| 106 | #define 0x01 /* remove mapping from old map */ |
| 107 | #define 0x02 /* try to keep it contig */ |
| 108 | #define 0x04 /* use quick refs */ |
| 109 | #define 0x08 /* set prot to maxprot as we go */ |
| 110 | #define 0x10 /* caller did uvm_map_reserve() */ |
| 111 | #define 0x20 /* set prot to UVM_PROT_ALL */ |
| 112 | |
| 113 | #endif /* _KERNEL */ |
| 114 | |
| 115 | #include <sys/rbtree.h> |
| 116 | #include <sys/pool.h> |
| 117 | #include <sys/rwlock.h> |
| 118 | #include <sys/mutex.h> |
| 119 | #include <sys/condvar.h> |
| 120 | |
| 121 | #include <uvm/uvm_anon.h> |
| 122 | |
| 123 | /* |
| 124 | * Address map entries consist of start and end addresses, |
| 125 | * a VM object (or sharing map) and offset into that object, |
| 126 | * and user-exported inheritance and protection information. |
| 127 | * Also included is control information for virtual copy operations. |
| 128 | */ |
| 129 | struct vm_map_entry { |
| 130 | struct rb_node rb_node; /* tree information */ |
| 131 | vsize_t gap; /* free space after */ |
| 132 | vsize_t maxgap; /* space in subtree */ |
| 133 | struct vm_map_entry *prev; /* previous entry */ |
| 134 | struct vm_map_entry *next; /* next entry */ |
| 135 | vaddr_t start; /* start address */ |
| 136 | vaddr_t end; /* end address */ |
| 137 | union { |
| 138 | struct uvm_object *uvm_obj; /* uvm object */ |
| 139 | struct vm_map *sub_map; /* belongs to another map */ |
| 140 | } object; /* object I point to */ |
| 141 | voff_t offset; /* offset into object */ |
| 142 | int etype; /* entry type */ |
| 143 | vm_prot_t protection; /* protection code */ |
| 144 | vm_prot_t max_protection; /* maximum protection */ |
| 145 | vm_inherit_t inheritance; /* inheritance */ |
| 146 | int wired_count; /* can be paged if == 0 */ |
| 147 | struct vm_aref aref; /* anonymous overlay */ |
| 148 | int advice; /* madvise advice */ |
| 149 | uint32_t map_attrib; /* uvm-external map attributes */ |
| 150 | #define uvm_map_entry_stop_copy flags |
| 151 | u_int8_t flags; /* flags */ |
| 152 | |
| 153 | #define UVM_MAP_KERNEL 0x01 /* kernel map entry */ |
| 154 | #define UVM_MAP_STATIC 0x04 /* special static entries */ |
| 155 | #define UVM_MAP_NOMERGE 0x08 /* this entry is not mergable */ |
| 156 | |
| 157 | }; |
| 158 | |
| 159 | #define VM_MAPENT_ISWIRED(entry) ((entry)->wired_count != 0) |
| 160 | |
| 161 | /* |
| 162 | * Maps are doubly-linked lists of map entries, kept sorted |
| 163 | * by address. A single hint is provided to start |
| 164 | * searches again from the last successful search, |
| 165 | * insertion, or removal. |
| 166 | * |
| 167 | * LOCKING PROTOCOL NOTES: |
| 168 | * ----------------------- |
| 169 | * |
| 170 | * VM map locking is a little complicated. There are both shared |
| 171 | * and exclusive locks on maps. However, it is sometimes required |
| 172 | * to downgrade an exclusive lock to a shared lock, and upgrade to |
| 173 | * an exclusive lock again (to perform error recovery). However, |
| 174 | * another thread *must not* queue itself to receive an exclusive |
| 175 | * lock while before we upgrade back to exclusive, otherwise the |
| 176 | * error recovery becomes extremely difficult, if not impossible. |
| 177 | * |
| 178 | * In order to prevent this scenario, we introduce the notion of |
| 179 | * a `busy' map. A `busy' map is read-locked, but other threads |
| 180 | * attempting to write-lock wait for this flag to clear before |
| 181 | * entering the lock manager. A map may only be marked busy |
| 182 | * when the map is write-locked (and then the map must be downgraded |
| 183 | * to read-locked), and may only be marked unbusy by the thread |
| 184 | * which marked it busy (holding *either* a read-lock or a |
| 185 | * write-lock, the latter being gained by an upgrade). |
| 186 | * |
| 187 | * Access to the map `flags' member is controlled by the `flags_lock' |
| 188 | * simple lock. Note that some flags are static (set once at map |
| 189 | * creation time, and never changed), and thus require no locking |
| 190 | * to check those flags. All flags which are r/w must be set or |
| 191 | * cleared while the `flags_lock' is asserted. Additional locking |
| 192 | * requirements are: |
| 193 | * |
| 194 | * VM_MAP_PAGEABLE r/o static flag; no locking required |
| 195 | * |
| 196 | * VM_MAP_WIREFUTURE r/w; may only be set or cleared when |
| 197 | * map is write-locked. may be tested |
| 198 | * without asserting `flags_lock'. |
| 199 | * |
| 200 | * VM_MAP_DYING r/o; set when a vmspace is being |
| 201 | * destroyed to indicate that updates |
| 202 | * to the pmap can be skipped. |
| 203 | * |
| 204 | * VM_MAP_TOPDOWN r/o; set when the vmspace is |
| 205 | * created if the unspecified map |
| 206 | * allocations are to be arranged in |
| 207 | * a "top down" manner. |
| 208 | */ |
| 209 | struct vm_map { |
| 210 | struct pmap * pmap; /* Physical map */ |
| 211 | krwlock_t lock; /* Non-intrsafe lock */ |
| 212 | struct lwp * busy; /* LWP holding map busy */ |
| 213 | kmutex_t misc_lock; /* Lock for ref_count, cv */ |
| 214 | kcondvar_t cv; /* For signalling */ |
| 215 | int flags; /* flags */ |
| 216 | struct rb_tree rb_tree; /* Tree for entries */ |
| 217 | struct vm_map_entry ; /* List of entries */ |
| 218 | int nentries; /* Number of entries */ |
| 219 | vsize_t size; /* virtual size */ |
| 220 | int ref_count; /* Reference count */ |
| 221 | struct vm_map_entry * hint; /* hint for quick lookups */ |
| 222 | struct vm_map_entry * first_free; /* First free space hint */ |
| 223 | unsigned int timestamp; /* Version number */ |
| 224 | }; |
| 225 | |
| 226 | #if defined(_KERNEL) |
| 227 | |
| 228 | #include <sys/callback.h> |
| 229 | |
| 230 | #endif /* defined(_KERNEL) */ |
| 231 | |
| 232 | #define VM_MAP_IS_KERNEL(map) (vm_map_pmap(map) == pmap_kernel()) |
| 233 | |
| 234 | /* vm_map flags */ |
| 235 | #define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable */ |
| 236 | #define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */ |
| 237 | #define VM_MAP_DYING 0x20 /* rw: map is being destroyed */ |
| 238 | #define VM_MAP_TOPDOWN 0x40 /* ro: arrange map top-down */ |
| 239 | #define VM_MAP_WANTVA 0x100 /* rw: want va */ |
| 240 | |
| 241 | #ifdef _KERNEL |
| 242 | struct uvm_map_args { |
| 243 | struct vm_map_entry *uma_prev; |
| 244 | |
| 245 | vaddr_t uma_start; |
| 246 | vsize_t uma_size; |
| 247 | |
| 248 | struct uvm_object *uma_uobj; |
| 249 | voff_t uma_uoffset; |
| 250 | |
| 251 | uvm_flag_t uma_flags; |
| 252 | }; |
| 253 | #endif /* _KERNEL */ |
| 254 | |
| 255 | /* |
| 256 | * globals: |
| 257 | */ |
| 258 | |
| 259 | #ifdef _KERNEL |
| 260 | |
| 261 | #include <sys/proc.h> |
| 262 | |
| 263 | #ifdef PMAP_GROWKERNEL |
| 264 | extern vaddr_t uvm_maxkaddr; |
| 265 | #endif |
| 266 | |
| 267 | /* |
| 268 | * protos: the following prototypes define the interface to vm_map |
| 269 | */ |
| 270 | |
| 271 | void uvm_map_deallocate(struct vm_map *); |
| 272 | |
| 273 | int uvm_map_willneed(struct vm_map *, vaddr_t, vaddr_t); |
| 274 | int uvm_map_clean(struct vm_map *, vaddr_t, vaddr_t, int); |
| 275 | void uvm_map_clip_start(struct vm_map *, struct vm_map_entry *, |
| 276 | vaddr_t); |
| 277 | void uvm_map_clip_end(struct vm_map *, struct vm_map_entry *, |
| 278 | vaddr_t); |
| 279 | int (struct vm_map *, vaddr_t, vsize_t, |
| 280 | struct vm_map *, vaddr_t *, int); |
| 281 | struct vm_map_entry * |
| 282 | uvm_map_findspace(struct vm_map *, vaddr_t, vsize_t, |
| 283 | vaddr_t *, struct uvm_object *, voff_t, vsize_t, int); |
| 284 | int uvm_map_inherit(struct vm_map *, vaddr_t, vaddr_t, |
| 285 | vm_inherit_t); |
| 286 | int uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int); |
| 287 | void uvm_map_init(void); |
| 288 | void uvm_map_init_caches(void); |
| 289 | bool uvm_map_lookup_entry(struct vm_map *, vaddr_t, |
| 290 | struct vm_map_entry **); |
| 291 | void uvm_map_reference(struct vm_map *); |
| 292 | int uvm_map_reserve(struct vm_map *, vsize_t, vaddr_t, vsize_t, |
| 293 | vaddr_t *, uvm_flag_t); |
| 294 | void uvm_map_setup(struct vm_map *, vaddr_t, vaddr_t, int); |
| 295 | int uvm_map_submap(struct vm_map *, vaddr_t, vaddr_t, |
| 296 | struct vm_map *); |
| 297 | void uvm_unmap1(struct vm_map *, vaddr_t, vaddr_t, int); |
| 298 | #define uvm_unmap(map, s, e) uvm_unmap1((map), (s), (e), 0) |
| 299 | void uvm_unmap_detach(struct vm_map_entry *,int); |
| 300 | void uvm_unmap_remove(struct vm_map *, vaddr_t, vaddr_t, |
| 301 | struct vm_map_entry **, int); |
| 302 | |
| 303 | int uvm_map_prepare(struct vm_map *, vaddr_t, vsize_t, |
| 304 | struct uvm_object *, voff_t, vsize_t, uvm_flag_t, |
| 305 | struct uvm_map_args *); |
| 306 | int uvm_map_enter(struct vm_map *, const struct uvm_map_args *, |
| 307 | struct vm_map_entry *); |
| 308 | |
| 309 | int uvm_mapent_trymerge(struct vm_map *, |
| 310 | struct vm_map_entry *, int); |
| 311 | #define UVM_MERGE_COPYING 1 |
| 312 | |
| 313 | bool vm_map_starved_p(struct vm_map *); |
| 314 | |
| 315 | /* |
| 316 | * VM map locking operations. |
| 317 | */ |
| 318 | |
| 319 | bool vm_map_lock_try(struct vm_map *); |
| 320 | void vm_map_lock(struct vm_map *); |
| 321 | void vm_map_unlock(struct vm_map *); |
| 322 | void vm_map_unbusy(struct vm_map *); |
| 323 | void vm_map_lock_read(struct vm_map *); |
| 324 | void vm_map_unlock_read(struct vm_map *); |
| 325 | void vm_map_busy(struct vm_map *); |
| 326 | bool vm_map_locked_p(struct vm_map *); |
| 327 | |
| 328 | void uvm_map_lock_entry(struct vm_map_entry *); |
| 329 | void uvm_map_unlock_entry(struct vm_map_entry *); |
| 330 | |
| 331 | #endif /* _KERNEL */ |
| 332 | |
| 333 | /* |
| 334 | * Functions implemented as macros |
| 335 | */ |
| 336 | #define vm_map_min(map) ((map)->header.end) |
| 337 | #define vm_map_max(map) ((map)->header.start) |
| 338 | #define vm_map_setmin(map, v) ((map)->header.end = (v)) |
| 339 | #define vm_map_setmax(map, v) ((map)->header.start = (v)) |
| 340 | |
| 341 | #define vm_map_pmap(map) ((map)->pmap) |
| 342 | |
| 343 | #endif /* _UVM_UVM_MAP_H_ */ |
| 344 | |