| 1 | /* $NetBSD: subr_workqueue.c,v 1.33 2012/10/07 22:16:21 matt Exp $ */ |
| 2 | |
| 3 | /*- |
| 4 | * Copyright (c)2002, 2005, 2006, 2007 YAMAMOTO Takashi, |
| 5 | * All rights reserved. |
| 6 | * |
| 7 | * Redistribution and use in source and binary forms, with or without |
| 8 | * modification, are permitted provided that the following conditions |
| 9 | * are met: |
| 10 | * 1. Redistributions of source code must retain the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer. |
| 12 | * 2. Redistributions in binary form must reproduce the above copyright |
| 13 | * notice, this list of conditions and the following disclaimer in the |
| 14 | * documentation and/or other materials provided with the distribution. |
| 15 | * |
| 16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
| 17 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 19 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
| 20 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 21 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 22 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 23 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 24 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 25 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 26 | * SUCH DAMAGE. |
| 27 | */ |
| 28 | |
| 29 | #include <sys/cdefs.h> |
| 30 | __KERNEL_RCSID(0, "$NetBSD: subr_workqueue.c,v 1.33 2012/10/07 22:16:21 matt Exp $" ); |
| 31 | |
| 32 | #include <sys/param.h> |
| 33 | #include <sys/cpu.h> |
| 34 | #include <sys/systm.h> |
| 35 | #include <sys/kthread.h> |
| 36 | #include <sys/kmem.h> |
| 37 | #include <sys/proc.h> |
| 38 | #include <sys/workqueue.h> |
| 39 | #include <sys/mutex.h> |
| 40 | #include <sys/condvar.h> |
| 41 | #include <sys/queue.h> |
| 42 | |
| 43 | typedef struct work_impl { |
| 44 | SIMPLEQ_ENTRY(work_impl) wk_entry; |
| 45 | } work_impl_t; |
| 46 | |
| 47 | SIMPLEQ_HEAD(workqhead, work_impl); |
| 48 | |
| 49 | struct workqueue_queue { |
| 50 | kmutex_t q_mutex; |
| 51 | kcondvar_t q_cv; |
| 52 | struct workqhead q_queue; |
| 53 | lwp_t *q_worker; |
| 54 | }; |
| 55 | |
| 56 | struct workqueue { |
| 57 | void (*wq_func)(struct work *, void *); |
| 58 | void *wq_arg; |
| 59 | int wq_flags; |
| 60 | |
| 61 | char wq_name[MAXCOMLEN]; |
| 62 | pri_t wq_prio; |
| 63 | void *wq_ptr; |
| 64 | }; |
| 65 | |
| 66 | #define WQ_SIZE (roundup2(sizeof(struct workqueue), coherency_unit)) |
| 67 | #define WQ_QUEUE_SIZE (roundup2(sizeof(struct workqueue_queue), coherency_unit)) |
| 68 | |
| 69 | #define POISON 0xaabbccdd |
| 70 | |
| 71 | static size_t |
| 72 | workqueue_size(int flags) |
| 73 | { |
| 74 | |
| 75 | return WQ_SIZE |
| 76 | + ((flags & WQ_PERCPU) != 0 ? ncpu : 1) * WQ_QUEUE_SIZE |
| 77 | + coherency_unit; |
| 78 | } |
| 79 | |
| 80 | static struct workqueue_queue * |
| 81 | workqueue_queue_lookup(struct workqueue *wq, struct cpu_info *ci) |
| 82 | { |
| 83 | u_int idx = 0; |
| 84 | |
| 85 | if (wq->wq_flags & WQ_PERCPU) { |
| 86 | idx = ci ? cpu_index(ci) : cpu_index(curcpu()); |
| 87 | } |
| 88 | |
| 89 | return (void *)((uintptr_t)(wq) + WQ_SIZE + (idx * WQ_QUEUE_SIZE)); |
| 90 | } |
| 91 | |
| 92 | static void |
| 93 | workqueue_runlist(struct workqueue *wq, struct workqhead *list) |
| 94 | { |
| 95 | work_impl_t *wk; |
| 96 | work_impl_t *next; |
| 97 | |
| 98 | /* |
| 99 | * note that "list" is not a complete SIMPLEQ. |
| 100 | */ |
| 101 | |
| 102 | for (wk = SIMPLEQ_FIRST(list); wk != NULL; wk = next) { |
| 103 | next = SIMPLEQ_NEXT(wk, wk_entry); |
| 104 | (*wq->wq_func)((void *)wk, wq->wq_arg); |
| 105 | } |
| 106 | } |
| 107 | |
| 108 | static void |
| 109 | workqueue_worker(void *cookie) |
| 110 | { |
| 111 | struct workqueue *wq = cookie; |
| 112 | struct workqueue_queue *q; |
| 113 | |
| 114 | /* find the workqueue of this kthread */ |
| 115 | q = workqueue_queue_lookup(wq, curlwp->l_cpu); |
| 116 | |
| 117 | for (;;) { |
| 118 | struct workqhead tmp; |
| 119 | |
| 120 | /* |
| 121 | * we violate abstraction of SIMPLEQ. |
| 122 | */ |
| 123 | |
| 124 | #if defined(DIAGNOSTIC) |
| 125 | tmp.sqh_last = (void *)POISON; |
| 126 | #endif /* defined(DIAGNOSTIC) */ |
| 127 | |
| 128 | mutex_enter(&q->q_mutex); |
| 129 | while (SIMPLEQ_EMPTY(&q->q_queue)) |
| 130 | cv_wait(&q->q_cv, &q->q_mutex); |
| 131 | tmp.sqh_first = q->q_queue.sqh_first; /* XXX */ |
| 132 | SIMPLEQ_INIT(&q->q_queue); |
| 133 | mutex_exit(&q->q_mutex); |
| 134 | |
| 135 | workqueue_runlist(wq, &tmp); |
| 136 | } |
| 137 | } |
| 138 | |
| 139 | static void |
| 140 | workqueue_init(struct workqueue *wq, const char *name, |
| 141 | void (*callback_func)(struct work *, void *), void *callback_arg, |
| 142 | pri_t prio, int ipl) |
| 143 | { |
| 144 | |
| 145 | strncpy(wq->wq_name, name, sizeof(wq->wq_name)); |
| 146 | |
| 147 | wq->wq_prio = prio; |
| 148 | wq->wq_func = callback_func; |
| 149 | wq->wq_arg = callback_arg; |
| 150 | } |
| 151 | |
| 152 | static int |
| 153 | workqueue_initqueue(struct workqueue *wq, struct workqueue_queue *q, |
| 154 | int ipl, struct cpu_info *ci) |
| 155 | { |
| 156 | int error, ktf; |
| 157 | |
| 158 | KASSERT(q->q_worker == NULL); |
| 159 | |
| 160 | mutex_init(&q->q_mutex, MUTEX_DEFAULT, ipl); |
| 161 | cv_init(&q->q_cv, wq->wq_name); |
| 162 | SIMPLEQ_INIT(&q->q_queue); |
| 163 | ktf = ((wq->wq_flags & WQ_MPSAFE) != 0 ? KTHREAD_MPSAFE : 0); |
| 164 | if (wq->wq_prio < PRI_KERNEL) |
| 165 | ktf |= KTHREAD_TS; |
| 166 | if (ci) { |
| 167 | error = kthread_create(wq->wq_prio, ktf, ci, workqueue_worker, |
| 168 | wq, &q->q_worker, "%s/%u" , wq->wq_name, ci->ci_index); |
| 169 | } else { |
| 170 | error = kthread_create(wq->wq_prio, ktf, ci, workqueue_worker, |
| 171 | wq, &q->q_worker, "%s" , wq->wq_name); |
| 172 | } |
| 173 | if (error != 0) { |
| 174 | mutex_destroy(&q->q_mutex); |
| 175 | cv_destroy(&q->q_cv); |
| 176 | KASSERT(q->q_worker == NULL); |
| 177 | } |
| 178 | return error; |
| 179 | } |
| 180 | |
| 181 | struct workqueue_exitargs { |
| 182 | work_impl_t wqe_wk; |
| 183 | struct workqueue_queue *wqe_q; |
| 184 | }; |
| 185 | |
| 186 | static void |
| 187 | workqueue_exit(struct work *wk, void *arg) |
| 188 | { |
| 189 | struct workqueue_exitargs *wqe = (void *)wk; |
| 190 | struct workqueue_queue *q = wqe->wqe_q; |
| 191 | |
| 192 | /* |
| 193 | * only competition at this point is workqueue_finiqueue. |
| 194 | */ |
| 195 | |
| 196 | KASSERT(q->q_worker == curlwp); |
| 197 | KASSERT(SIMPLEQ_EMPTY(&q->q_queue)); |
| 198 | mutex_enter(&q->q_mutex); |
| 199 | q->q_worker = NULL; |
| 200 | cv_signal(&q->q_cv); |
| 201 | mutex_exit(&q->q_mutex); |
| 202 | kthread_exit(0); |
| 203 | } |
| 204 | |
| 205 | static void |
| 206 | workqueue_finiqueue(struct workqueue *wq, struct workqueue_queue *q) |
| 207 | { |
| 208 | struct workqueue_exitargs wqe; |
| 209 | |
| 210 | KASSERT(wq->wq_func == workqueue_exit); |
| 211 | |
| 212 | wqe.wqe_q = q; |
| 213 | KASSERT(SIMPLEQ_EMPTY(&q->q_queue)); |
| 214 | KASSERT(q->q_worker != NULL); |
| 215 | mutex_enter(&q->q_mutex); |
| 216 | SIMPLEQ_INSERT_TAIL(&q->q_queue, &wqe.wqe_wk, wk_entry); |
| 217 | cv_signal(&q->q_cv); |
| 218 | while (q->q_worker != NULL) { |
| 219 | cv_wait(&q->q_cv, &q->q_mutex); |
| 220 | } |
| 221 | mutex_exit(&q->q_mutex); |
| 222 | mutex_destroy(&q->q_mutex); |
| 223 | cv_destroy(&q->q_cv); |
| 224 | } |
| 225 | |
| 226 | /* --- */ |
| 227 | |
| 228 | int |
| 229 | workqueue_create(struct workqueue **wqp, const char *name, |
| 230 | void (*callback_func)(struct work *, void *), void *callback_arg, |
| 231 | pri_t prio, int ipl, int flags) |
| 232 | { |
| 233 | struct workqueue *wq; |
| 234 | struct workqueue_queue *q; |
| 235 | void *ptr; |
| 236 | int error = 0; |
| 237 | |
| 238 | CTASSERT(sizeof(work_impl_t) <= sizeof(struct work)); |
| 239 | |
| 240 | ptr = kmem_zalloc(workqueue_size(flags), KM_SLEEP); |
| 241 | wq = (void *)roundup2((uintptr_t)ptr, coherency_unit); |
| 242 | wq->wq_ptr = ptr; |
| 243 | wq->wq_flags = flags; |
| 244 | |
| 245 | workqueue_init(wq, name, callback_func, callback_arg, prio, ipl); |
| 246 | |
| 247 | if (flags & WQ_PERCPU) { |
| 248 | struct cpu_info *ci; |
| 249 | CPU_INFO_ITERATOR cii; |
| 250 | |
| 251 | /* create the work-queue for each CPU */ |
| 252 | for (CPU_INFO_FOREACH(cii, ci)) { |
| 253 | q = workqueue_queue_lookup(wq, ci); |
| 254 | error = workqueue_initqueue(wq, q, ipl, ci); |
| 255 | if (error) { |
| 256 | break; |
| 257 | } |
| 258 | } |
| 259 | } else { |
| 260 | /* initialize a work-queue */ |
| 261 | q = workqueue_queue_lookup(wq, NULL); |
| 262 | error = workqueue_initqueue(wq, q, ipl, NULL); |
| 263 | } |
| 264 | |
| 265 | if (error != 0) { |
| 266 | workqueue_destroy(wq); |
| 267 | } else { |
| 268 | *wqp = wq; |
| 269 | } |
| 270 | |
| 271 | return error; |
| 272 | } |
| 273 | |
| 274 | void |
| 275 | workqueue_destroy(struct workqueue *wq) |
| 276 | { |
| 277 | struct workqueue_queue *q; |
| 278 | struct cpu_info *ci; |
| 279 | CPU_INFO_ITERATOR cii; |
| 280 | |
| 281 | wq->wq_func = workqueue_exit; |
| 282 | for (CPU_INFO_FOREACH(cii, ci)) { |
| 283 | q = workqueue_queue_lookup(wq, ci); |
| 284 | if (q->q_worker != NULL) { |
| 285 | workqueue_finiqueue(wq, q); |
| 286 | } |
| 287 | } |
| 288 | kmem_free(wq->wq_ptr, workqueue_size(wq->wq_flags)); |
| 289 | } |
| 290 | |
| 291 | void |
| 292 | workqueue_enqueue(struct workqueue *wq, struct work *wk0, struct cpu_info *ci) |
| 293 | { |
| 294 | struct workqueue_queue *q; |
| 295 | work_impl_t *wk = (void *)wk0; |
| 296 | |
| 297 | KASSERT(wq->wq_flags & WQ_PERCPU || ci == NULL); |
| 298 | q = workqueue_queue_lookup(wq, ci); |
| 299 | |
| 300 | mutex_enter(&q->q_mutex); |
| 301 | SIMPLEQ_INSERT_TAIL(&q->q_queue, wk, wk_entry); |
| 302 | cv_signal(&q->q_cv); |
| 303 | mutex_exit(&q->q_mutex); |
| 304 | } |
| 305 | |