| 1 | /* $NetBSD: rf_states.c,v 1.50 2016/01/03 08:17:24 mlelstv Exp $ */ |
| 2 | /* |
| 3 | * Copyright (c) 1995 Carnegie-Mellon University. |
| 4 | * All rights reserved. |
| 5 | * |
| 6 | * Author: Mark Holland, William V. Courtright II, Robby Findler |
| 7 | * |
| 8 | * Permission to use, copy, modify and distribute this software and |
| 9 | * its documentation is hereby granted, provided that both the copyright |
| 10 | * notice and this permission notice appear in all copies of the |
| 11 | * software, derivative works or modified versions, and any portions |
| 12 | * thereof, and that both notices appear in supporting documentation. |
| 13 | * |
| 14 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
| 15 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND |
| 16 | * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
| 17 | * |
| 18 | * Carnegie Mellon requests users of this software to return to |
| 19 | * |
| 20 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
| 21 | * School of Computer Science |
| 22 | * Carnegie Mellon University |
| 23 | * Pittsburgh PA 15213-3890 |
| 24 | * |
| 25 | * any improvements or extensions that they make and grant Carnegie the |
| 26 | * rights to redistribute these changes. |
| 27 | */ |
| 28 | |
| 29 | #include <sys/cdefs.h> |
| 30 | __KERNEL_RCSID(0, "$NetBSD: rf_states.c,v 1.50 2016/01/03 08:17:24 mlelstv Exp $" ); |
| 31 | |
| 32 | #include <sys/errno.h> |
| 33 | |
| 34 | #include "rf_archs.h" |
| 35 | #include "rf_threadstuff.h" |
| 36 | #include "rf_raid.h" |
| 37 | #include "rf_dag.h" |
| 38 | #include "rf_desc.h" |
| 39 | #include "rf_aselect.h" |
| 40 | #include "rf_general.h" |
| 41 | #include "rf_states.h" |
| 42 | #include "rf_dagutils.h" |
| 43 | #include "rf_driver.h" |
| 44 | #include "rf_engine.h" |
| 45 | #include "rf_map.h" |
| 46 | #include "rf_etimer.h" |
| 47 | #include "rf_kintf.h" |
| 48 | #include "rf_paritymap.h" |
| 49 | |
| 50 | #ifndef RF_DEBUG_STATES |
| 51 | #define RF_DEBUG_STATES 0 |
| 52 | #endif |
| 53 | |
| 54 | /* prototypes for some of the available states. |
| 55 | |
| 56 | States must: |
| 57 | |
| 58 | - not block. |
| 59 | |
| 60 | - either schedule rf_ContinueRaidAccess as a callback and return |
| 61 | RF_TRUE, or complete all of their work and return RF_FALSE. |
| 62 | |
| 63 | - increment desc->state when they have finished their work. |
| 64 | */ |
| 65 | |
| 66 | #if RF_DEBUG_STATES |
| 67 | static char * |
| 68 | StateName(RF_AccessState_t state) |
| 69 | { |
| 70 | switch (state) { |
| 71 | case rf_QuiesceState:return "QuiesceState" ; |
| 72 | case rf_MapState: |
| 73 | return "MapState" ; |
| 74 | case rf_LockState: |
| 75 | return "LockState" ; |
| 76 | case rf_CreateDAGState: |
| 77 | return "CreateDAGState" ; |
| 78 | case rf_ExecuteDAGState: |
| 79 | return "ExecuteDAGState" ; |
| 80 | case rf_ProcessDAGState: |
| 81 | return "ProcessDAGState" ; |
| 82 | case rf_CleanupState: |
| 83 | return "CleanupState" ; |
| 84 | case rf_LastState: |
| 85 | return "LastState" ; |
| 86 | case rf_IncrAccessesCountState: |
| 87 | return "IncrAccessesCountState" ; |
| 88 | case rf_DecrAccessesCountState: |
| 89 | return "DecrAccessesCountState" ; |
| 90 | default: |
| 91 | return "!!! UnnamedState !!!" ; |
| 92 | } |
| 93 | } |
| 94 | #endif |
| 95 | |
| 96 | void |
| 97 | rf_ContinueRaidAccess(RF_RaidAccessDesc_t *desc) |
| 98 | { |
| 99 | int suspended = RF_FALSE; |
| 100 | int current_state_index = desc->state; |
| 101 | RF_AccessState_t current_state = desc->states[current_state_index]; |
| 102 | #if RF_DEBUG_STATES |
| 103 | int unit = desc->raidPtr->raidid; |
| 104 | #endif |
| 105 | |
| 106 | do { |
| 107 | |
| 108 | current_state_index = desc->state; |
| 109 | current_state = desc->states[current_state_index]; |
| 110 | |
| 111 | switch (current_state) { |
| 112 | |
| 113 | case rf_QuiesceState: |
| 114 | suspended = rf_State_Quiesce(desc); |
| 115 | break; |
| 116 | case rf_IncrAccessesCountState: |
| 117 | suspended = rf_State_IncrAccessCount(desc); |
| 118 | break; |
| 119 | case rf_MapState: |
| 120 | suspended = rf_State_Map(desc); |
| 121 | break; |
| 122 | case rf_LockState: |
| 123 | suspended = rf_State_Lock(desc); |
| 124 | break; |
| 125 | case rf_CreateDAGState: |
| 126 | suspended = rf_State_CreateDAG(desc); |
| 127 | break; |
| 128 | case rf_ExecuteDAGState: |
| 129 | suspended = rf_State_ExecuteDAG(desc); |
| 130 | break; |
| 131 | case rf_ProcessDAGState: |
| 132 | suspended = rf_State_ProcessDAG(desc); |
| 133 | break; |
| 134 | case rf_CleanupState: |
| 135 | suspended = rf_State_Cleanup(desc); |
| 136 | break; |
| 137 | case rf_DecrAccessesCountState: |
| 138 | suspended = rf_State_DecrAccessCount(desc); |
| 139 | break; |
| 140 | case rf_LastState: |
| 141 | suspended = rf_State_LastState(desc); |
| 142 | break; |
| 143 | } |
| 144 | |
| 145 | /* after this point, we cannot dereference desc since |
| 146 | * desc may have been freed. desc is only freed in |
| 147 | * LastState, so if we renter this function or loop |
| 148 | * back up, desc should be valid. */ |
| 149 | |
| 150 | #if RF_DEBUG_STATES |
| 151 | if (rf_printStatesDebug) { |
| 152 | printf("raid%d: State: %-24s StateIndex: %3i desc: 0x%ld %s\n" , |
| 153 | unit, StateName(current_state), |
| 154 | current_state_index, (long) desc, |
| 155 | suspended ? "callback scheduled" : "looping" ); |
| 156 | } |
| 157 | #endif |
| 158 | } while (!suspended && current_state != rf_LastState); |
| 159 | |
| 160 | return; |
| 161 | } |
| 162 | |
| 163 | |
| 164 | void |
| 165 | rf_ContinueDagAccess(RF_DagList_t *dagList) |
| 166 | { |
| 167 | #if RF_ACC_TRACE > 0 |
| 168 | RF_AccTraceEntry_t *tracerec = &(dagList->desc->tracerec); |
| 169 | RF_Etimer_t timer; |
| 170 | #endif |
| 171 | RF_RaidAccessDesc_t *desc; |
| 172 | RF_DagHeader_t *dag_h; |
| 173 | int i; |
| 174 | |
| 175 | desc = dagList->desc; |
| 176 | |
| 177 | #if RF_ACC_TRACE > 0 |
| 178 | timer = tracerec->timer; |
| 179 | RF_ETIMER_STOP(timer); |
| 180 | RF_ETIMER_EVAL(timer); |
| 181 | tracerec->specific.user.exec_us = RF_ETIMER_VAL_US(timer); |
| 182 | RF_ETIMER_START(tracerec->timer); |
| 183 | #endif |
| 184 | |
| 185 | /* skip to dag which just finished */ |
| 186 | dag_h = dagList->dags; |
| 187 | for (i = 0; i < dagList->numDagsDone; i++) { |
| 188 | dag_h = dag_h->next; |
| 189 | } |
| 190 | |
| 191 | /* check to see if retry is required */ |
| 192 | if (dag_h->status == rf_rollBackward) { |
| 193 | /* when a dag fails, mark desc status as bad and allow |
| 194 | * all other dags in the desc to execute to |
| 195 | * completion. then, free all dags and start over */ |
| 196 | desc->status = 1; /* bad status */ |
| 197 | #if 0 |
| 198 | printf("raid%d: DAG failure: %c addr 0x%lx " |
| 199 | "(%ld) nblk 0x%x (%d) buf 0x%lx state %d\n" , |
| 200 | desc->raidPtr->raidid, desc->type, |
| 201 | (long) desc->raidAddress, |
| 202 | (long) desc->raidAddress, (int) desc->numBlocks, |
| 203 | (int) desc->numBlocks, |
| 204 | (unsigned long) (desc->bufPtr), desc->state); |
| 205 | #endif |
| 206 | } |
| 207 | dagList->numDagsDone++; |
| 208 | rf_ContinueRaidAccess(desc); |
| 209 | } |
| 210 | |
| 211 | int |
| 212 | rf_State_LastState(RF_RaidAccessDesc_t *desc) |
| 213 | { |
| 214 | void (*callbackFunc) (RF_CBParam_t) = desc->callbackFunc; |
| 215 | RF_CBParam_t callbackArg; |
| 216 | |
| 217 | callbackArg.p = desc->callbackArg; |
| 218 | |
| 219 | /* |
| 220 | * We don't support non-async IO. |
| 221 | */ |
| 222 | KASSERT(desc->async_flag); |
| 223 | |
| 224 | /* |
| 225 | * The parity_map hook has to go here, because the iodone |
| 226 | * callback goes straight into the kintf layer. |
| 227 | */ |
| 228 | if (desc->raidPtr->parity_map != NULL && |
| 229 | desc->type == RF_IO_TYPE_WRITE) |
| 230 | rf_paritymap_end(desc->raidPtr->parity_map, |
| 231 | desc->raidAddress, desc->numBlocks); |
| 232 | |
| 233 | /* printf("Calling raiddone on 0x%x\n",desc->bp); */ |
| 234 | raiddone(desc->raidPtr, desc->bp); /* access came through ioctl */ |
| 235 | |
| 236 | if (callbackFunc) |
| 237 | callbackFunc(callbackArg); |
| 238 | rf_FreeRaidAccDesc(desc); |
| 239 | |
| 240 | return RF_FALSE; |
| 241 | } |
| 242 | |
| 243 | int |
| 244 | rf_State_IncrAccessCount(RF_RaidAccessDesc_t *desc) |
| 245 | { |
| 246 | RF_Raid_t *raidPtr; |
| 247 | |
| 248 | raidPtr = desc->raidPtr; |
| 249 | /* Bummer. We have to do this to be 100% safe w.r.t. the increment |
| 250 | * below */ |
| 251 | rf_lock_mutex2(raidPtr->access_suspend_mutex); |
| 252 | raidPtr->accs_in_flight++; /* used to detect quiescence */ |
| 253 | rf_unlock_mutex2(raidPtr->access_suspend_mutex); |
| 254 | |
| 255 | desc->state++; |
| 256 | return RF_FALSE; |
| 257 | } |
| 258 | |
| 259 | int |
| 260 | rf_State_DecrAccessCount(RF_RaidAccessDesc_t *desc) |
| 261 | { |
| 262 | RF_Raid_t *raidPtr; |
| 263 | |
| 264 | raidPtr = desc->raidPtr; |
| 265 | |
| 266 | rf_lock_mutex2(raidPtr->access_suspend_mutex); |
| 267 | raidPtr->accs_in_flight--; |
| 268 | if (raidPtr->accesses_suspended && raidPtr->accs_in_flight == 0) { |
| 269 | rf_SignalQuiescenceLock(raidPtr); |
| 270 | } |
| 271 | rf_unlock_mutex2(raidPtr->access_suspend_mutex); |
| 272 | |
| 273 | desc->state++; |
| 274 | return RF_FALSE; |
| 275 | } |
| 276 | |
| 277 | int |
| 278 | rf_State_Quiesce(RF_RaidAccessDesc_t *desc) |
| 279 | { |
| 280 | #if RF_ACC_TRACE > 0 |
| 281 | RF_AccTraceEntry_t *tracerec = &desc->tracerec; |
| 282 | RF_Etimer_t timer; |
| 283 | #endif |
| 284 | RF_CallbackDesc_t *cb; |
| 285 | RF_Raid_t *raidPtr; |
| 286 | int suspended = RF_FALSE; |
| 287 | int need_cb, used_cb; |
| 288 | |
| 289 | raidPtr = desc->raidPtr; |
| 290 | |
| 291 | #if RF_ACC_TRACE > 0 |
| 292 | RF_ETIMER_START(timer); |
| 293 | RF_ETIMER_START(desc->timer); |
| 294 | #endif |
| 295 | |
| 296 | need_cb = 0; |
| 297 | used_cb = 0; |
| 298 | cb = NULL; |
| 299 | |
| 300 | rf_lock_mutex2(raidPtr->access_suspend_mutex); |
| 301 | /* Do an initial check to see if we might need a callback structure */ |
| 302 | if (raidPtr->accesses_suspended) { |
| 303 | need_cb = 1; |
| 304 | } |
| 305 | rf_unlock_mutex2(raidPtr->access_suspend_mutex); |
| 306 | |
| 307 | if (need_cb) { |
| 308 | /* create a callback if we might need it... |
| 309 | and we likely do. */ |
| 310 | cb = rf_AllocCallbackDesc(); |
| 311 | } |
| 312 | |
| 313 | rf_lock_mutex2(raidPtr->access_suspend_mutex); |
| 314 | if (raidPtr->accesses_suspended) { |
| 315 | cb->callbackFunc = (void (*) (RF_CBParam_t)) rf_ContinueRaidAccess; |
| 316 | cb->callbackArg.p = (void *) desc; |
| 317 | cb->next = raidPtr->quiesce_wait_list; |
| 318 | raidPtr->quiesce_wait_list = cb; |
| 319 | suspended = RF_TRUE; |
| 320 | used_cb = 1; |
| 321 | } |
| 322 | rf_unlock_mutex2(raidPtr->access_suspend_mutex); |
| 323 | |
| 324 | if ((need_cb == 1) && (used_cb == 0)) { |
| 325 | rf_FreeCallbackDesc(cb); |
| 326 | } |
| 327 | |
| 328 | #if RF_ACC_TRACE > 0 |
| 329 | RF_ETIMER_STOP(timer); |
| 330 | RF_ETIMER_EVAL(timer); |
| 331 | tracerec->specific.user.suspend_ovhd_us += RF_ETIMER_VAL_US(timer); |
| 332 | #endif |
| 333 | |
| 334 | #if RF_DEBUG_QUIESCE |
| 335 | if (suspended && rf_quiesceDebug) |
| 336 | printf("Stalling access due to quiescence lock\n" ); |
| 337 | #endif |
| 338 | desc->state++; |
| 339 | return suspended; |
| 340 | } |
| 341 | |
| 342 | int |
| 343 | rf_State_Map(RF_RaidAccessDesc_t *desc) |
| 344 | { |
| 345 | RF_Raid_t *raidPtr = desc->raidPtr; |
| 346 | #if RF_ACC_TRACE > 0 |
| 347 | RF_AccTraceEntry_t *tracerec = &desc->tracerec; |
| 348 | RF_Etimer_t timer; |
| 349 | |
| 350 | RF_ETIMER_START(timer); |
| 351 | #endif |
| 352 | |
| 353 | if (!(desc->asmap = rf_MapAccess(raidPtr, desc->raidAddress, desc->numBlocks, |
| 354 | desc->bufPtr, RF_DONT_REMAP))) |
| 355 | RF_PANIC(); |
| 356 | |
| 357 | #if RF_ACC_TRACE > 0 |
| 358 | RF_ETIMER_STOP(timer); |
| 359 | RF_ETIMER_EVAL(timer); |
| 360 | tracerec->specific.user.map_us = RF_ETIMER_VAL_US(timer); |
| 361 | #endif |
| 362 | |
| 363 | desc->state++; |
| 364 | return RF_FALSE; |
| 365 | } |
| 366 | |
| 367 | int |
| 368 | rf_State_Lock(RF_RaidAccessDesc_t *desc) |
| 369 | { |
| 370 | #if RF_ACC_TRACE > 0 |
| 371 | RF_AccTraceEntry_t *tracerec = &desc->tracerec; |
| 372 | RF_Etimer_t timer; |
| 373 | #endif |
| 374 | RF_Raid_t *raidPtr = desc->raidPtr; |
| 375 | RF_AccessStripeMapHeader_t *asmh = desc->asmap; |
| 376 | RF_AccessStripeMap_t *asm_p; |
| 377 | RF_StripeNum_t lastStripeID = -1; |
| 378 | int suspended = RF_FALSE; |
| 379 | |
| 380 | #if RF_ACC_TRACE > 0 |
| 381 | RF_ETIMER_START(timer); |
| 382 | #endif |
| 383 | |
| 384 | /* acquire each lock that we don't already hold */ |
| 385 | for (asm_p = asmh->stripeMap; asm_p; asm_p = asm_p->next) { |
| 386 | RF_ASSERT(RF_IO_IS_R_OR_W(desc->type)); |
| 387 | if (!rf_suppressLocksAndLargeWrites && |
| 388 | asm_p->parityInfo && |
| 389 | !(desc->flags & RF_DAG_SUPPRESS_LOCKS) && |
| 390 | !(asm_p->flags & RF_ASM_FLAGS_LOCK_TRIED)) { |
| 391 | asm_p->flags |= RF_ASM_FLAGS_LOCK_TRIED; |
| 392 | /* locks must be acquired hierarchically */ |
| 393 | RF_ASSERT(asm_p->stripeID > lastStripeID); |
| 394 | lastStripeID = asm_p->stripeID; |
| 395 | |
| 396 | RF_INIT_LOCK_REQ_DESC(asm_p->lockReqDesc, desc->type, |
| 397 | (void (*) (struct buf *)) rf_ContinueRaidAccess, desc, asm_p, |
| 398 | raidPtr->Layout.dataSectorsPerStripe); |
| 399 | if (rf_AcquireStripeLock(raidPtr->lockTable, asm_p->stripeID, |
| 400 | &asm_p->lockReqDesc)) { |
| 401 | suspended = RF_TRUE; |
| 402 | break; |
| 403 | } |
| 404 | } |
| 405 | if (desc->type == RF_IO_TYPE_WRITE && |
| 406 | raidPtr->status == rf_rs_reconstructing) { |
| 407 | if (!(asm_p->flags & RF_ASM_FLAGS_FORCE_TRIED)) { |
| 408 | int val; |
| 409 | |
| 410 | asm_p->flags |= RF_ASM_FLAGS_FORCE_TRIED; |
| 411 | val = rf_ForceOrBlockRecon(raidPtr, asm_p, |
| 412 | (void (*) (RF_Raid_t *, void *)) rf_ContinueRaidAccess, desc); |
| 413 | if (val == 0) { |
| 414 | asm_p->flags |= RF_ASM_FLAGS_RECON_BLOCKED; |
| 415 | } else { |
| 416 | suspended = RF_TRUE; |
| 417 | break; |
| 418 | } |
| 419 | } else { |
| 420 | #if RF_DEBUG_PSS > 0 |
| 421 | if (rf_pssDebug) { |
| 422 | printf("raid%d: skipping force/block because already done, psid %ld\n" , |
| 423 | desc->raidPtr->raidid, |
| 424 | (long) asm_p->stripeID); |
| 425 | } |
| 426 | #endif |
| 427 | } |
| 428 | } else { |
| 429 | #if RF_DEBUG_PSS > 0 |
| 430 | if (rf_pssDebug) { |
| 431 | printf("raid%d: skipping force/block because not write or not under recon, psid %ld\n" , |
| 432 | desc->raidPtr->raidid, |
| 433 | (long) asm_p->stripeID); |
| 434 | } |
| 435 | #endif |
| 436 | } |
| 437 | } |
| 438 | #if RF_ACC_TRACE > 0 |
| 439 | RF_ETIMER_STOP(timer); |
| 440 | RF_ETIMER_EVAL(timer); |
| 441 | tracerec->specific.user.lock_us += RF_ETIMER_VAL_US(timer); |
| 442 | #endif |
| 443 | if (suspended) |
| 444 | return (RF_TRUE); |
| 445 | |
| 446 | desc->state++; |
| 447 | return (RF_FALSE); |
| 448 | } |
| 449 | /* |
| 450 | * the following three states create, execute, and post-process dags |
| 451 | * the error recovery unit is a single dag. |
| 452 | * by default, SelectAlgorithm creates an array of dags, one per parity stripe |
| 453 | * in some tricky cases, multiple dags per stripe are created |
| 454 | * - dags within a parity stripe are executed sequentially (arbitrary order) |
| 455 | * - dags for distinct parity stripes are executed concurrently |
| 456 | * |
| 457 | * repeat until all dags complete successfully -or- dag selection fails |
| 458 | * |
| 459 | * while !done |
| 460 | * create dag(s) (SelectAlgorithm) |
| 461 | * if dag |
| 462 | * execute dag (DispatchDAG) |
| 463 | * if dag successful |
| 464 | * done (SUCCESS) |
| 465 | * else |
| 466 | * !done (RETRY - start over with new dags) |
| 467 | * else |
| 468 | * done (FAIL) |
| 469 | */ |
| 470 | int |
| 471 | rf_State_CreateDAG(RF_RaidAccessDesc_t *desc) |
| 472 | { |
| 473 | #if RF_ACC_TRACE > 0 |
| 474 | RF_AccTraceEntry_t *tracerec = &desc->tracerec; |
| 475 | RF_Etimer_t timer; |
| 476 | #endif |
| 477 | RF_DagHeader_t *dag_h; |
| 478 | RF_DagList_t *dagList; |
| 479 | struct buf *bp; |
| 480 | int i, selectStatus; |
| 481 | |
| 482 | /* generate a dag for the access, and fire it off. When the dag |
| 483 | * completes, we'll get re-invoked in the next state. */ |
| 484 | #if RF_ACC_TRACE > 0 |
| 485 | RF_ETIMER_START(timer); |
| 486 | #endif |
| 487 | /* SelectAlgorithm returns one or more dags */ |
| 488 | selectStatus = rf_SelectAlgorithm(desc, desc->flags | RF_DAG_SUPPRESS_LOCKS); |
| 489 | #if RF_DEBUG_VALIDATE_DAG |
| 490 | if (rf_printDAGsDebug) { |
| 491 | dagList = desc->dagList; |
| 492 | for (i = 0; i < desc->numStripes; i++) { |
| 493 | rf_PrintDAGList(dagList->dags); |
| 494 | dagList = dagList->next; |
| 495 | } |
| 496 | } |
| 497 | #endif /* RF_DEBUG_VALIDATE_DAG */ |
| 498 | #if RF_ACC_TRACE > 0 |
| 499 | RF_ETIMER_STOP(timer); |
| 500 | RF_ETIMER_EVAL(timer); |
| 501 | /* update time to create all dags */ |
| 502 | tracerec->specific.user.dag_create_us = RF_ETIMER_VAL_US(timer); |
| 503 | #endif |
| 504 | |
| 505 | desc->status = 0; /* good status */ |
| 506 | |
| 507 | if (selectStatus || (desc->numRetries > RF_RETRY_THRESHOLD)) { |
| 508 | /* failed to create a dag */ |
| 509 | /* this happens when there are too many faults or incomplete |
| 510 | * dag libraries */ |
| 511 | if (selectStatus) { |
| 512 | printf("raid%d: failed to create a dag. " |
| 513 | "Too many component failures.\n" , |
| 514 | desc->raidPtr->raidid); |
| 515 | } else { |
| 516 | printf("raid%d: IO failed after %d retries.\n" , |
| 517 | desc->raidPtr->raidid, RF_RETRY_THRESHOLD); |
| 518 | } |
| 519 | |
| 520 | desc->status = 1; /* bad status */ |
| 521 | /* skip straight to rf_State_Cleanup() */ |
| 522 | desc->state = rf_CleanupState; |
| 523 | bp = (struct buf *)desc->bp; |
| 524 | bp->b_error = EIO; |
| 525 | bp->b_resid = bp->b_bcount; |
| 526 | } else { |
| 527 | /* bind dags to desc */ |
| 528 | dagList = desc->dagList; |
| 529 | for (i = 0; i < desc->numStripes; i++) { |
| 530 | dag_h = dagList->dags; |
| 531 | while (dag_h) { |
| 532 | dag_h->bp = (struct buf *) desc->bp; |
| 533 | #if RF_ACC_TRACE > 0 |
| 534 | dag_h->tracerec = tracerec; |
| 535 | #endif |
| 536 | dag_h = dag_h->next; |
| 537 | } |
| 538 | dagList = dagList->next; |
| 539 | } |
| 540 | desc->flags |= RF_DAG_DISPATCH_RETURNED; |
| 541 | desc->state++; /* next state should be rf_State_ExecuteDAG */ |
| 542 | } |
| 543 | return RF_FALSE; |
| 544 | } |
| 545 | |
| 546 | |
| 547 | |
| 548 | /* the access has an list of dagLists, one dagList per parity stripe. |
| 549 | * fire the first dag in each parity stripe (dagList). |
| 550 | * dags within a stripe (dagList) must be executed sequentially |
| 551 | * - this preserves atomic parity update |
| 552 | * dags for independents parity groups (stripes) are fired concurrently */ |
| 553 | |
| 554 | int |
| 555 | rf_State_ExecuteDAG(RF_RaidAccessDesc_t *desc) |
| 556 | { |
| 557 | int i; |
| 558 | RF_DagHeader_t *dag_h; |
| 559 | RF_DagList_t *dagList; |
| 560 | |
| 561 | /* next state is always rf_State_ProcessDAG important to do |
| 562 | * this before firing the first dag (it may finish before we |
| 563 | * leave this routine) */ |
| 564 | desc->state++; |
| 565 | |
| 566 | /* sweep dag array, a stripe at a time, firing the first dag |
| 567 | * in each stripe */ |
| 568 | dagList = desc->dagList; |
| 569 | for (i = 0; i < desc->numStripes; i++) { |
| 570 | RF_ASSERT(dagList->numDags > 0); |
| 571 | RF_ASSERT(dagList->numDagsDone == 0); |
| 572 | RF_ASSERT(dagList->numDagsFired == 0); |
| 573 | #if RF_ACC_TRACE > 0 |
| 574 | RF_ETIMER_START(dagList->tracerec.timer); |
| 575 | #endif |
| 576 | /* fire first dag in this stripe */ |
| 577 | dag_h = dagList->dags; |
| 578 | RF_ASSERT(dag_h); |
| 579 | dagList->numDagsFired++; |
| 580 | rf_DispatchDAG(dag_h, (void (*) (void *)) rf_ContinueDagAccess, dagList); |
| 581 | dagList = dagList->next; |
| 582 | } |
| 583 | |
| 584 | /* the DAG will always call the callback, even if there was no |
| 585 | * blocking, so we are always suspended in this state */ |
| 586 | return RF_TRUE; |
| 587 | } |
| 588 | |
| 589 | |
| 590 | |
| 591 | /* rf_State_ProcessDAG is entered when a dag completes. |
| 592 | * first, check to all dags in the access have completed |
| 593 | * if not, fire as many dags as possible */ |
| 594 | |
| 595 | int |
| 596 | rf_State_ProcessDAG(RF_RaidAccessDesc_t *desc) |
| 597 | { |
| 598 | RF_AccessStripeMapHeader_t *asmh = desc->asmap; |
| 599 | RF_Raid_t *raidPtr = desc->raidPtr; |
| 600 | RF_DagHeader_t *dag_h; |
| 601 | int i, j, done = RF_TRUE; |
| 602 | RF_DagList_t *dagList, *temp; |
| 603 | |
| 604 | /* check to see if this is the last dag */ |
| 605 | dagList = desc->dagList; |
| 606 | for (i = 0; i < desc->numStripes; i++) { |
| 607 | if (dagList->numDags != dagList->numDagsDone) |
| 608 | done = RF_FALSE; |
| 609 | dagList = dagList->next; |
| 610 | } |
| 611 | |
| 612 | if (done) { |
| 613 | if (desc->status) { |
| 614 | /* a dag failed, retry */ |
| 615 | /* free all dags */ |
| 616 | dagList = desc->dagList; |
| 617 | for (i = 0; i < desc->numStripes; i++) { |
| 618 | rf_FreeDAG(dagList->dags); |
| 619 | temp = dagList; |
| 620 | dagList = dagList->next; |
| 621 | rf_FreeDAGList(temp); |
| 622 | } |
| 623 | desc->dagList = NULL; |
| 624 | |
| 625 | rf_MarkFailuresInASMList(raidPtr, asmh); |
| 626 | |
| 627 | /* note the retry so that we'll bail in |
| 628 | rf_State_CreateDAG() once we've retired |
| 629 | the IO RF_RETRY_THRESHOLD times */ |
| 630 | |
| 631 | desc->numRetries++; |
| 632 | |
| 633 | /* back up to rf_State_CreateDAG */ |
| 634 | desc->state = desc->state - 2; |
| 635 | return RF_FALSE; |
| 636 | } else { |
| 637 | /* move on to rf_State_Cleanup */ |
| 638 | desc->state++; |
| 639 | } |
| 640 | return RF_FALSE; |
| 641 | } else { |
| 642 | /* more dags to execute */ |
| 643 | /* see if any are ready to be fired. if so, fire them */ |
| 644 | /* don't fire the initial dag in a list, it's fired in |
| 645 | * rf_State_ExecuteDAG */ |
| 646 | dagList = desc->dagList; |
| 647 | for (i = 0; i < desc->numStripes; i++) { |
| 648 | if ((dagList->numDagsDone < dagList->numDags) |
| 649 | && (dagList->numDagsDone == dagList->numDagsFired) |
| 650 | && (dagList->numDagsFired > 0)) { |
| 651 | #if RF_ACC_TRACE > 0 |
| 652 | RF_ETIMER_START(dagList->tracerec.timer); |
| 653 | #endif |
| 654 | /* fire next dag in this stripe */ |
| 655 | /* first, skip to next dag awaiting execution */ |
| 656 | dag_h = dagList->dags; |
| 657 | for (j = 0; j < dagList->numDagsDone; j++) |
| 658 | dag_h = dag_h->next; |
| 659 | dagList->numDagsFired++; |
| 660 | rf_DispatchDAG(dag_h, (void (*) (void *)) rf_ContinueDagAccess, |
| 661 | dagList); |
| 662 | } |
| 663 | dagList = dagList->next; |
| 664 | } |
| 665 | return RF_TRUE; |
| 666 | } |
| 667 | } |
| 668 | /* only make it this far if all dags complete successfully */ |
| 669 | int |
| 670 | rf_State_Cleanup(RF_RaidAccessDesc_t *desc) |
| 671 | { |
| 672 | #if RF_ACC_TRACE > 0 |
| 673 | RF_AccTraceEntry_t *tracerec = &desc->tracerec; |
| 674 | RF_Etimer_t timer; |
| 675 | #endif |
| 676 | RF_AccessStripeMapHeader_t *asmh = desc->asmap; |
| 677 | RF_Raid_t *raidPtr = desc->raidPtr; |
| 678 | RF_AccessStripeMap_t *asm_p; |
| 679 | RF_DagList_t *dagList; |
| 680 | int i; |
| 681 | |
| 682 | desc->state++; |
| 683 | |
| 684 | #if RF_ACC_TRACE > 0 |
| 685 | timer = tracerec->timer; |
| 686 | RF_ETIMER_STOP(timer); |
| 687 | RF_ETIMER_EVAL(timer); |
| 688 | tracerec->specific.user.dag_retry_us = RF_ETIMER_VAL_US(timer); |
| 689 | |
| 690 | /* the RAID I/O is complete. Clean up. */ |
| 691 | tracerec->specific.user.dag_retry_us = 0; |
| 692 | |
| 693 | RF_ETIMER_START(timer); |
| 694 | #endif |
| 695 | /* free all dags */ |
| 696 | dagList = desc->dagList; |
| 697 | for (i = 0; i < desc->numStripes; i++) { |
| 698 | rf_FreeDAG(dagList->dags); |
| 699 | dagList = dagList->next; |
| 700 | } |
| 701 | #if RF_ACC_TRACE > 0 |
| 702 | RF_ETIMER_STOP(timer); |
| 703 | RF_ETIMER_EVAL(timer); |
| 704 | tracerec->specific.user.cleanup_us = RF_ETIMER_VAL_US(timer); |
| 705 | |
| 706 | RF_ETIMER_START(timer); |
| 707 | #endif |
| 708 | for (asm_p = asmh->stripeMap; asm_p; asm_p = asm_p->next) { |
| 709 | if (!rf_suppressLocksAndLargeWrites && |
| 710 | asm_p->parityInfo && |
| 711 | !(desc->flags & RF_DAG_SUPPRESS_LOCKS)) { |
| 712 | RF_ASSERT_VALID_LOCKREQ(&asm_p->lockReqDesc); |
| 713 | rf_ReleaseStripeLock(raidPtr->lockTable, |
| 714 | asm_p->stripeID, |
| 715 | &asm_p->lockReqDesc); |
| 716 | } |
| 717 | if (asm_p->flags & RF_ASM_FLAGS_RECON_BLOCKED) { |
| 718 | rf_UnblockRecon(raidPtr, asm_p); |
| 719 | } |
| 720 | } |
| 721 | #if RF_ACC_TRACE > 0 |
| 722 | RF_ETIMER_STOP(timer); |
| 723 | RF_ETIMER_EVAL(timer); |
| 724 | tracerec->specific.user.lock_us += RF_ETIMER_VAL_US(timer); |
| 725 | |
| 726 | RF_ETIMER_START(timer); |
| 727 | #endif |
| 728 | rf_FreeAccessStripeMap(asmh); |
| 729 | #if RF_ACC_TRACE > 0 |
| 730 | RF_ETIMER_STOP(timer); |
| 731 | RF_ETIMER_EVAL(timer); |
| 732 | tracerec->specific.user.cleanup_us += RF_ETIMER_VAL_US(timer); |
| 733 | |
| 734 | RF_ETIMER_STOP(desc->timer); |
| 735 | RF_ETIMER_EVAL(desc->timer); |
| 736 | |
| 737 | timer = desc->tracerec.tot_timer; |
| 738 | RF_ETIMER_STOP(timer); |
| 739 | RF_ETIMER_EVAL(timer); |
| 740 | desc->tracerec.total_us = RF_ETIMER_VAL_US(timer); |
| 741 | |
| 742 | rf_LogTraceRec(raidPtr, tracerec); |
| 743 | #endif |
| 744 | desc->flags |= RF_DAG_ACCESS_COMPLETE; |
| 745 | |
| 746 | return RF_FALSE; |
| 747 | } |
| 748 | |