# This is a BitKeeper generated patch for the following project:
# Project Name: Linux kernel tree
# This patch format is intended for GNU patch command version 2.5 or higher.
# This patch includes the following deltas:
#	           ChangeSet	1.457.7.5 -> 1.457.7.6
#	drivers/usb/host/ehci-q.c	1.20    -> 1.21   
#	drivers/usb/host/ehci-sched.c	1.15    -> 1.16   
#	drivers/usb/host/ehci-hcd.c	1.22    -> 1.23   
#	drivers/usb/host/ehci.h	1.5     -> 1.6    
#
# The following is the BitKeeper ChangeSet Log
# --------------------------------------------
# 02/08/06	david-b@pacbell.net	1.457.7.6
# [PATCH] ehci does interrupt queuing
# 
# This patch makes EHCI
# 
#   - Share the same TD queueing code for control, bulk,
#     and interrupt traffic;
#   - Queue interrupt transfers, modifying the code for
#     urb submit/unlink/complete;
#   - Thinner, by removing lots of nasty fatty special case
#     logic for interrupt transfers (size, no queueing, etc);
#   - Grow some "automagic resubmit" logic, ready to be
#     ripped out soonish;
#   - Package its interrupt scheduling so it can be called
#     from more places.
# --------------------------------------------
#
diff -Nru a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
--- a/drivers/usb/host/ehci-hcd.c	Mon Aug 12 10:57:46 2002
+++ b/drivers/usb/host/ehci-hcd.c	Mon Aug 12 10:57:46 2002
@@ -65,6 +65,8 @@
  *
  * HISTORY:
  *
+ * 2002-08-06	Handling for bulk and interrupt transfers is mostly shared;
+ *	only scheduling is different, no arbitrary limitations.
  * 2002-07-25	Sanity check PCI reads, mostly for better cardbus support,
  * 	clean up HC run state handshaking.
  * 2002-05-24	Preliminary FS/LS interrupts, using scheduling shortcuts
@@ -85,7 +87,7 @@
  * 2001-June	Works with usb-storage and NEC EHCI on 2.4
  */
 
-#define DRIVER_VERSION "2002-Jul-25"
+#define DRIVER_VERSION "2002-Aug-06"
 #define DRIVER_AUTHOR "David Brownell"
 #define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver"
 
@@ -93,6 +95,8 @@
 // #define EHCI_VERBOSE_DEBUG
 // #define have_split_iso
 
+#define INTR_AUTOMAGIC		/* to be removed later in 2.5 */
+
 /* magic numbers that can affect system performance */
 #define	EHCI_TUNE_CERR		3	/* 0-3 qtd retries; 0 == don't stop */
 #define	EHCI_TUNE_RL_HS		0	/* nak throttle; see 4.9 */
@@ -618,7 +622,8 @@
  *
  * hcd-specific init for hcpriv hasn't been done yet
  *
- * NOTE:  EHCI queues control and bulk requests transparently, like OHCI.
+ * NOTE:  control, bulk, and interrupt share the same code to append TDs
+ * to a (possibly active) QH, and the same QH scanning code.
  */
 static int ehci_urb_enqueue (
 	struct usb_hcd	*hcd,
@@ -694,17 +699,35 @@
 		if (qh->qh_state == QH_STATE_LINKED)
 			start_unlink_async (ehci, qh);
 		spin_unlock_irqrestore (&ehci->lock, flags);
-		return 0;
+		break;
 
 	case PIPE_INTERRUPT:
-		intr_deschedule (ehci, urb->start_frame, qh,
-			(urb->dev->speed == USB_SPEED_HIGH)
-			    ? urb->interval
-			    : (urb->interval << 3));
-		if (ehci->hcd.state == USB_STATE_HALT)
-			urb->status = -ESHUTDOWN;
-		qh_completions (ehci, qh, 1);
-		return 0;
+		if (qh->qh_state == QH_STATE_LINKED) {
+			/* messy, can spin or block a microframe ... */
+			intr_deschedule (ehci, qh, 1);
+			/* qh_state == IDLE */
+		}
+		qh_completions (ehci, qh);
+
+		/* reschedule QH iff another request is queued */
+		if (!list_empty (&qh->qtd_list)
+				&& HCD_IS_RUNNING (ehci->hcd.state)) {
+			int status;
+
+			spin_lock_irqsave (&ehci->lock, flags);
+			status = qh_schedule (ehci, qh);
+			spin_unlock_irqrestore (&ehci->lock, flags);
+
+			if (status != 0) {
+				// shouldn't happen often, but ...
+				// FIXME kill those tds' urbs
+				err ("can't reschedule qh %p, err %d",
+					qh, status);
+			}
+			return status;
+		}
+
+		break;
 
 	case PIPE_ISOCHRONOUS:
 		// itd or sitd ...
diff -Nru a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
--- a/drivers/usb/host/ehci-q.c	Mon Aug 12 10:57:46 2002
+++ b/drivers/usb/host/ehci-q.c	Mon Aug 12 10:57:46 2002
@@ -159,30 +159,6 @@
 	}
 }
 
-static void ehci_urb_complete (
-	struct ehci_hcd		*ehci,
-	dma_addr_t		addr,
-	struct urb		*urb
-) {
-	if (urb->transfer_buffer_length && usb_pipein (urb->pipe))
-		pci_dma_sync_single (ehci->hcd.pdev, addr,
-			urb->transfer_buffer_length,
-			PCI_DMA_FROMDEVICE);
-
-	/* cleanse status if we saw no error */
-	if (likely (urb->status == -EINPROGRESS)) {
-		if (urb->actual_length != urb->transfer_buffer_length
-				&& (urb->transfer_flags & URB_SHORT_NOT_OK))
-			urb->status = -EREMOTEIO;
-		else
-			urb->status = 0;
-	}
-
-	/* only report unlinks once */
-	if (likely (urb->status != -ENOENT && urb->status != -ENOTCONN))
-		urb->complete (urb);
-}
-
 /* urb->lock ignored from here on (hcd is done with urb) */
 
 static void ehci_urb_done (
@@ -190,6 +166,11 @@
 	dma_addr_t		addr,
 	struct urb		*urb
 ) {
+#ifdef	INTR_AUTOMAGIC
+	struct urb		*resubmit = 0;
+	struct usb_device	*dev = 0;
+#endif
+
 	if (urb->transfer_buffer_length)
 		pci_unmap_single (ehci->hcd.pdev,
 			addr,
@@ -198,7 +179,23 @@
 			    ? PCI_DMA_FROMDEVICE
 			    : PCI_DMA_TODEVICE);
 	if (likely (urb->hcpriv != 0)) {
-		qh_put (ehci, (struct ehci_qh *) urb->hcpriv);
+		struct ehci_qh	*qh = (struct ehci_qh *) urb->hcpriv;
+
+		/* S-mask in a QH means it's an interrupt urb */
+		if ((qh->hw_info2 & cpu_to_le32 (0x00ff)) != 0) {
+
+			/* ... update hc-wide periodic stats (for usbfs) */
+			ehci->hcd.self.bandwidth_int_reqs--;
+
+#ifdef	INTR_AUTOMAGIC
+			if (!((urb->status == -ENOENT)
+					|| (urb->status == -ECONNRESET))) {
+				resubmit = usb_get_urb (urb);
+				dev = urb->dev;
+			}
+#endif
+		}
+		qh_put (ehci, qh);
 		urb->hcpriv = 0;
 	}
 
@@ -210,33 +207,46 @@
 			urb->status = 0;
 	}
 
-	/* hand off urb ownership */
 	usb_hcd_giveback_urb (&ehci->hcd, urb);
+
+#ifdef	INTR_AUTOMAGIC
+	if (resubmit && ((urb->status == -ENOENT)
+				|| (urb->status == -ECONNRESET))) {
+		usb_put_urb (resubmit);
+		resubmit = 0;
+	}
+	// device drivers will soon be doing something like this
+	if (resubmit) {
+		int	status;
+
+		resubmit->dev = dev;
+		status = usb_submit_urb (resubmit, SLAB_KERNEL);
+		if (status != 0)
+			err ("can't resubmit interrupt urb %p: status %d",
+					resubmit, status);
+		usb_put_urb (resubmit);
+	}
+#endif
 }
 
 
 /*
  * Process completed qtds for a qh, issuing completions if needed.
- * When freeing:  frees qtds, unmaps buf, returns URB to driver.
- * When not freeing (queued periodic qh):  retain qtds, mapping, and urb.
+ * Frees qtds, unmaps buf, returns URB to driver.
  * Races up to qh->hw_current; returns number of urb completions.
  */
-static int
-qh_completions (
-	struct ehci_hcd		*ehci,
-	struct ehci_qh		*qh,
-	int			freeing
-) {
+static void
+qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
 	struct ehci_qtd		*qtd, *last;
 	struct list_head	*next, *qtd_list = &qh->qtd_list;
 	int			unlink = 0, halted = 0;
 	unsigned long		flags;
-	int			retval = 0;
 
 	spin_lock_irqsave (&ehci->lock, flags);
 	if (unlikely (list_empty (qtd_list))) {
 		spin_unlock_irqrestore (&ehci->lock, flags);
-		return retval;
+		return;
 	}
 
 	/* scan QTDs till end of list, or we reach an active one */
@@ -253,14 +263,8 @@
 			if (likely (last->urb != urb)) {
 				/* complete() can reenter this HCD */
 				spin_unlock_irqrestore (&ehci->lock, flags);
-				if (likely (freeing != 0))
-					ehci_urb_done (ehci, last->buf_dma,
-						last->urb);
-				else
-					ehci_urb_complete (ehci, last->buf_dma,
-						last->urb);
+				ehci_urb_done (ehci, last->buf_dma, last->urb);
 				spin_lock_irqsave (&ehci->lock, flags);
-				retval++;
 			}
 
 			/* qh overlays can have HC's old cached copies of
@@ -272,8 +276,7 @@
 				qh->hw_qtd_next = last->hw_next;
 			}
 
-			if (likely (freeing != 0))
-				ehci_qtd_free (ehci, last);
+			ehci_qtd_free (ehci, last);
 			last = 0;
 		}
 		next = qtd->qtd_list.next;
@@ -290,7 +293,7 @@
 
 		/* fault: unlink the rest, since this qtd saw an error? */
 		if (unlikely ((token & QTD_STS_HALT) != 0)) {
-			freeing = unlink = 1;
+			unlink = 1;
 			/* status copied below */
 
 		/* QH halts only because of fault (above) or unlink (here). */
@@ -298,13 +301,14 @@
 
 			/* unlinking everything because of HC shutdown? */
 			if (ehci->hcd.state == USB_STATE_HALT) {
-				freeing = unlink = 1;
+				unlink = 1;
 
 			/* explicit unlink, maybe starting here? */
 			} else if (qh->qh_state == QH_STATE_IDLE
 					&& (urb->status == -ECONNRESET
+						|| urb->status == -ESHUTDOWN
 						|| urb->status == -ENOENT)) {
-				freeing = unlink = 1;
+				unlink = 1;
 
 			/* QH halted to unlink urbs _after_ this?  */
 			} else if (!unlink && (token & QTD_STS_ACTIVE) != 0) {
@@ -332,31 +336,7 @@
 		qtd_copy_status (urb, qtd->length, token);
 		spin_unlock (&urb->lock);
 
-		/*
-		 * NOTE:  this won't work right with interrupt urbs that
-		 * need multiple qtds ... only the first scan of qh->qtd_list
-		 * starts at the right qtd, yet multiple scans could happen
-		 * for transfers that are scheduled across multiple uframes. 
-		 * (Such schedules are not currently allowed!)
-		 */
-		if (likely (freeing != 0))
-			list_del (&qtd->qtd_list);
-		else {
-			/* restore everything the HC could change
-			 * from an interrupt QTD
-			 */
-			qtd->hw_token = (qtd->hw_token
-					& __constant_cpu_to_le32 (0x8300))
-				| cpu_to_le32 (qtd->length << 16)
-				| __constant_cpu_to_le32 (QTD_STS_ACTIVE
-					| (EHCI_TUNE_CERR << 10));
-			qtd->hw_buf [0] &= ~__constant_cpu_to_le32 (0x0fff);
-
-			/* this offset, and the length above,
-			 * are likely wrong on QTDs #2..N
-			 */
-			qtd->hw_buf [0] |= cpu_to_le32 (0x0fff & qtd->buf_dma);
-		}
+		list_del (&qtd->qtd_list);
 
 #if 0
 		if (urb->status == -EINPROGRESS)
@@ -384,14 +364,9 @@
 
 	/* last urb's completion might still need calling */
 	if (likely (last != 0)) {
-		if (likely (freeing != 0)) {
-			ehci_urb_done (ehci, last->buf_dma, last->urb);
-			ehci_qtd_free (ehci, last);
-		} else
-			ehci_urb_complete (ehci, last->buf_dma, last->urb);
-		retval++;
+		ehci_urb_done (ehci, last->buf_dma, last->urb);
+		ehci_qtd_free (ehci, last);
 	}
-	return retval;
 }
 
 /*-------------------------------------------------------------------------*/
@@ -659,7 +634,7 @@
 	if (type == PIPE_INTERRUPT) {
 		qh->usecs = usb_calc_bus_time (USB_SPEED_HIGH, is_input, 0,
 				hb_mult (maxp) * max_packet (maxp));
-		qh->start = ~0;
+		qh->start = NO_FRAME;
 
 		if (urb->dev->speed == USB_SPEED_HIGH) {
 			qh->c_usecs = 0;
@@ -742,8 +717,12 @@
 	qh->hw_info2 = cpu_to_le32 (info2);
 
 	/* initialize sw and hw queues with these qtds */
-	list_splice (qtd_list, &qh->qtd_list);
-	qh_update (qh, list_entry (qtd_list->next, struct ehci_qtd, qtd_list));
+	if (!list_empty (qtd_list)) {
+		list_splice (qtd_list, &qh->qtd_list);
+		qh_update (qh, list_entry (qtd_list->next, struct ehci_qtd, qtd_list));
+	} else {
+		qh->hw_qtd_next = qh->hw_alt_next = EHCI_LIST_END;
+	}
 
 	/* initialize data toggle state */
 	clear_toggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, qh);
@@ -813,25 +792,29 @@
 	qh = (struct ehci_qh *) *ptr;
 	if (likely (qh != 0)) {
 		struct ehci_qtd	*qtd;
-		u32		hw_next;
 
-		qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list);
-		hw_next = QTD_NEXT (qtd->qtd_dma);
+		if (unlikely (list_empty (qtd_list)))
+			qtd = 0;
+		else
+			qtd = list_entry (qtd_list->next, struct ehci_qtd,
+					qtd_list);
 
 		/* maybe patch the qh used for set_address */
 		if (unlikely (epnum == 0
 				&& le32_to_cpu (qh->hw_info1 & 0x7f) == 0))
 			qh->hw_info1 |= cpu_to_le32 (usb_pipedevice(urb->pipe));
 
-		/* is an URB is queued to this qh already? */
-		if (unlikely (!list_empty (&qh->qtd_list))) {
+		/* append to tds already queued to this qh? */
+		if (unlikely (!list_empty (&qh->qtd_list) && qtd)) {
 			struct ehci_qtd		*last_qtd;
 			int			short_rx = 0;
+			u32			hw_next;
 
 			/* update the last qtd's "next" pointer */
 			// dbg_qh ("non-empty qh", ehci, qh);
 			last_qtd = list_entry (qh->qtd_list.prev,
 					struct ehci_qtd, qtd_list);
+			hw_next = QTD_NEXT (qtd->qtd_dma);
 			last_qtd->hw_next = hw_next;
 
 			/* previous urb allows short rx? maybe optimize. */
@@ -872,7 +855,8 @@
 				clear_toggle (urb->dev,
 					epnum & 0x0f, !(epnum & 0x10), qh);
 			}
-			qh_update (qh, qtd);
+			if (qtd)
+				qh_update (qh, qtd);
 		}
 		list_splice (qtd_list, qh->qtd_list.prev);
 
@@ -946,7 +930,7 @@
 	ehci->reclaim = 0;
 	ehci->reclaim_ready = 0;
 
-	qh_completions (ehci, qh, 1);
+	qh_completions (ehci, qh);
 
 	// unlink any urb should now unlink all following urbs, so that
 	// relinking only happens for urbs before the unlinked ones.
@@ -1046,7 +1030,7 @@
 				spin_unlock_irqrestore (&ehci->lock, flags);
 
 				/* concurrent unlink could happen here */
-				qh_completions (ehci, qh, 1);
+				qh_completions (ehci, qh);
 
 				spin_lock_irqsave (&ehci->lock, flags);
 				qh_put (ehci, qh);
diff -Nru a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
--- a/drivers/usb/host/ehci-sched.c	Mon Aug 12 10:57:46 2002
+++ b/drivers/usb/host/ehci-sched.c	Mon Aug 12 10:57:46 2002
@@ -220,23 +220,23 @@
 
 /*-------------------------------------------------------------------------*/
 
+// FIXME microframe periods not yet handled
+
 static void intr_deschedule (
 	struct ehci_hcd	*ehci,
-	unsigned	frame,
 	struct ehci_qh	*qh,
-	unsigned	period
+	int		wait
 ) {
 	unsigned long	flags;
 	int		status;
-
-	period >>= 3;		// FIXME microframe periods not handled yet
+	unsigned	frame = qh->start;
 
 	spin_lock_irqsave (&ehci->lock, flags);
 
 	do {
 		periodic_unlink (ehci, frame, qh);
 		qh_put (ehci, qh);
-		frame += period;
+		frame += qh->period;
 	} while (frame < ehci->periodic_size);
 
 	qh->qh_state = QH_STATE_UNLINK;
@@ -258,14 +258,28 @@
 	 * (yeech!) to be sure it's done.
 	 * No other threads may be mucking with this qh.
 	 */
-	if (!status && ((ehci_get_frame (&ehci->hcd) - frame) % period) == 0)
-		udelay (125);
+	if (((ehci_get_frame (&ehci->hcd) - frame) % qh->period) == 0) {
+		if (wait) {
+			udelay (125);
+			qh->hw_next = EHCI_LIST_END;
+		} else {
+			/* we may not be IDLE yet, but if the qh is empty
+			 * the race is very short.  then if qh also isn't
+			 * rescheduled soon, it won't matter.  otherwise...
+			 */
+			vdbg ("intr_deschedule...");
+		}
+	} else
+		qh->hw_next = EHCI_LIST_END;
 
 	qh->qh_state = QH_STATE_IDLE;
-	qh->hw_next = EHCI_LIST_END;
+
+	/* update per-qh bandwidth utilization (for usbfs) */
+	ehci->hcd.self.bandwidth_allocated -= 
+		(qh->usecs + qh->c_usecs) / qh->period;
 
 	vdbg ("descheduled qh %p, per = %d frame = %d count = %d, urbs = %d",
-		qh, period, frame,
+		qh, qh->period, frame,
 		atomic_read (&qh->refcount), ehci->periodic_sched);
 }
 
@@ -309,6 +323,129 @@
 	return 1;
 }
 
+static int check_intr_schedule (
+	struct ehci_hcd		*ehci, 
+	unsigned		frame,
+	unsigned		uframe,
+	const struct ehci_qh	*qh,
+	u32			*c_maskp
+)
+{
+    	int		retval = -ENOSPC;
+
+	if (!check_period (ehci, frame, uframe, qh->period, qh->usecs))
+		goto done;
+	if (!qh->c_usecs) {
+		retval = 0;
+		*c_maskp = cpu_to_le32 (0);
+		goto done;
+	}
+
+	/* This is a split transaction; check the bandwidth available for
+	 * the completion too.  Check both worst and best case gaps: worst
+	 * case is SPLIT near uframe end, and CSPLIT near start ... best is
+	 * vice versa.  Difference can be almost two uframe times, but we
+	 * reserve unnecessary bandwidth (waste it) this way.  (Actually
+	 * even better cases exist, like immediate device NAK.)
+	 *
+	 * FIXME don't even bother unless we know this TT is idle in that
+	 * range of uframes ... for now, check_period() allows only one
+	 * interrupt transfer per frame, so needn't check "TT busy" status
+	 * when scheduling a split (QH, SITD, or FSTN).
+	 *
+	 * FIXME ehci 0.96 and above can use FSTNs
+	 */
+	if (!check_period (ehci, frame, uframe + qh->gap_uf + 1,
+				qh->period, qh->c_usecs))
+		goto done;
+	if (!check_period (ehci, frame, uframe + qh->gap_uf,
+				qh->period, qh->c_usecs))
+		goto done;
+
+	*c_maskp = cpu_to_le32 (0x03 << (8 + uframe + qh->gap_uf));
+	retval = 0;
+done:
+	return retval;
+}
+
+static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+	int 		status;
+	unsigned	uframe;
+	u32		c_mask;
+	unsigned	frame;		/* 0..(qh->period - 1), or NO_FRAME */
+
+	qh->hw_next = EHCI_LIST_END;
+	frame = qh->start;
+
+	/* reuse the previous schedule slots, if we can */
+	if (frame < qh->period) {
+		uframe = ffs (le32_to_cpup (&qh->hw_info2) & 0x00ff);
+		status = check_intr_schedule (ehci, frame, --uframe,
+				qh, &c_mask);
+	} else {
+		uframe = 0;
+		c_mask = 0;
+		status = -ENOSPC;
+	}
+
+	/* else scan the schedule to find a group of slots such that all
+	 * uframes have enough periodic bandwidth available.
+	 */
+	if (status) {
+		frame = qh->period - 1;
+		do {
+			for (uframe = 0; uframe < 8; uframe++) {
+				status = check_intr_schedule (ehci,
+						frame, uframe, qh,
+						&c_mask);
+				if (status == 0)
+					break;
+			}
+		} while (status && --frame);
+		if (status)
+			goto done;
+		qh->start = frame;
+
+		/* reset S-frame and (maybe) C-frame masks */
+		qh->hw_info2 &= ~0xffff;
+		qh->hw_info2 |= cpu_to_le32 (1 << uframe) | c_mask;
+	} else
+		dbg ("reused previous qh %p schedule", qh);
+
+	/* stuff into the periodic schedule */
+	qh->qh_state = QH_STATE_LINKED;
+	dbg ("qh %p usecs %d/%d period %d.0 starting %d.%d (gap %d)",
+		qh, qh->usecs, qh->c_usecs,
+		qh->period, frame, uframe, qh->gap_uf);
+	do {
+		if (unlikely (ehci->pshadow [frame].ptr != 0)) {
+
+// FIXME -- just link toward the end, before any qh with a shorter period,
+// AND accomodate it already having been linked here (after some other qh)
+// AS WELL AS updating the schedule checking logic
+
+			BUG ();
+		} else {
+			ehci->pshadow [frame].qh = qh_get (qh);
+			ehci->periodic [frame] =
+				QH_NEXT (qh->qh_dma);
+		}
+		wmb ();
+		frame += qh->period;
+	} while (frame < ehci->periodic_size);
+
+	/* update per-qh bandwidth for usbfs */
+	ehci->hcd.self.bandwidth_allocated += 
+		(qh->usecs + qh->c_usecs) / qh->period;
+
+	/* maybe enable periodic schedule processing */
+	if (!ehci->periodic_sched++)
+		status = enable_periodic (ehci);
+done:
+	return status;
+}
+
 static int intr_submit (
 	struct ehci_hcd		*ehci,
 	struct urb		*urb,
@@ -321,6 +458,7 @@
 	struct hcd_dev		*dev;
 	int			is_input;
 	int			status = 0;
+	struct list_head	empty;
 
 	/* get endpoint and transfer/schedule data */
 	epnum = usb_pipeendpoint (urb->pipe);
@@ -328,161 +466,30 @@
 	if (is_input)
 		epnum |= 0x10;
 
-	/*
-	 * NOTE: current completion/restart logic doesn't handle more than
-	 * one qtd in a periodic qh ... 16-20 KB/urb is pretty big for this.
-	 * such big requests need many periods to transfer.
-	 *
-	 * FIXME want to change hcd core submit model to expect queuing
-	 * for all transfer types ... not just ISO and (with flag) BULK.
-	 * that means: getting rid of this check; handling the "interrupt
-	 * urb already queued" case below like bulk queuing is handled (no
-	 * errors possible!); and completly getting rid of that annoying
-	 * qh restart logic.  simpler/smaller overall, and more flexible.
-	 */
-	if (unlikely (qtd_list->next != qtd_list->prev)) {
-		dbg ("only one intr qtd per urb allowed"); 
-		status = -EINVAL;
-		goto done;
-	}
-
 	spin_lock_irqsave (&ehci->lock, flags);
-
-	/* get the qh (must be empty and idle) */
 	dev = (struct hcd_dev *)urb->dev->hcpriv;
-	qh = (struct ehci_qh *) dev->ep [epnum];
-	if (qh) {
-		/* only allow one queued interrupt urb per EP */
-		if (unlikely (qh->qh_state != QH_STATE_IDLE
-				|| !list_empty (&qh->qtd_list))) {
-			dbg ("interrupt urb already queued");
-			status = -EBUSY;
-		} else {
-			/* maybe reset hardware's data toggle in the qh */
-			if (unlikely (!usb_gettoggle (urb->dev, epnum & 0x0f,
-					!(epnum & 0x10)))) {
-				qh->hw_token |=
-					__constant_cpu_to_le32 (QTD_TOGGLE);
-				usb_settoggle (urb->dev, epnum & 0x0f,
-					!(epnum & 0x10), 1);
-			}
-			/* trust the QH was set up as interrupt ... */
-			list_splice (qtd_list, &qh->qtd_list);
-			qh_update (qh, list_entry (qtd_list->next,
-						struct ehci_qtd, qtd_list));
-			qtd_list = &qh->qtd_list;
-		}
-	} else {
-		/* can't sleep here, we have ehci->lock... */
-		qh = ehci_qh_make (ehci, urb, qtd_list, SLAB_ATOMIC);
-		if (likely (qh != 0)) {
-			// dbg ("new INTR qh %p", qh);
-			dev->ep [epnum] = qh;
-			qtd_list = &qh->qtd_list;
-		} else
-			status = -ENOMEM;
-	}
 
-	/* Schedule this periodic QH. */
-	if (likely (status == 0)) {
-		unsigned	frame = qh->period;
-
-		qh->hw_next = EHCI_LIST_END;
-
-		urb->hcpriv = qh_get (qh);
-		status = -ENOSPC;
+	/* get qh and force any scheduling errors */
+	INIT_LIST_HEAD (&empty);
+	qh = qh_append_tds (ehci, urb, &empty, epnum, &dev->ep [epnum]);
+	if (qh == 0) {
+		status = -ENOMEM;
+		goto done;
+	}
+	if (qh->qh_state == QH_STATE_IDLE) {
+		if ((status = qh_schedule (ehci, qh)) != 0)
+			goto done;
+	}
 
-		/* pick a set of schedule slots, link the QH into them */
-		do {
-			unsigned	uframe;
-			u32		c_mask = 0;
+	/* then queue the urb's tds to the qh */
+	qh = qh_append_tds (ehci, urb, qtd_list, epnum, &dev->ep [epnum]);
+	BUG_ON (qh == 0);
 
-			/* pick a set of slots such that all uframes have
-			 * enough periodic bandwidth available.
-			 */
-			frame--;
-			for (uframe = 0; uframe < 8; uframe++) {
-				if (check_period (ehci, frame, uframe,
-						qh->period, qh->usecs) == 0)
-					continue;
-
-				/* If this is a split transaction, check the
-				 * bandwidth available for the completion
-				 * too.  check both best and worst case gaps:
-				 * worst case is SPLIT near uframe end, and
-				 * CSPLIT near start ... best is vice versa.
-				 * Difference can be almost two uframe times.
-				 *
-				 * FIXME don't even bother unless we know
-				 * this TT is idle in that uframe ... right
-				 * now we know only one interrupt transfer
-				 * will be scheduled per frame, so we don't
-				 * need to update/check TT state when we
-				 * schedule a split (QH, SITD, or FSTN).
-				 *
-				 * FIXME ehci 0.96 and above can use FSTNs
-				 */
-				if (!qh->c_usecs)
-				    	break;
-				if (check_period (ehci, frame,
-						uframe + qh->gap_uf,
-						qh->period, qh->c_usecs) == 0)
-					continue;
-				if (check_period (ehci, frame,
-						uframe + qh->gap_uf + 1,
-						qh->period, qh->c_usecs) == 0)
-					continue;
-
-				c_mask = 0x03 << (8 + uframe + qh->gap_uf);
-				c_mask = cpu_to_le32 (c_mask);
-				break;
-			}
-			if (uframe == 8)
-				continue;
+	/* ... update usbfs periodic stats */
+	ehci->hcd.self.bandwidth_int_reqs++;
 
-			/* QH will run once each period, starting there  */
-			urb->start_frame = qh->start = frame;
-			status = 0;
-
-			/* reset S-frame and (maybe) C-frame masks */
-			qh->hw_info2 &= ~0xffff;
-			qh->hw_info2 |= cpu_to_le32 (1 << uframe) | c_mask;
-			// dbg_qh ("Schedule INTR qh", ehci, qh);
-
-			/* stuff into the periodic schedule */
-			qh->qh_state = QH_STATE_LINKED;
-			dbg ("qh %p usecs %d/%d period %d.0 "
-					"starting %d.%d (gap %d)",
-				qh, qh->usecs, qh->c_usecs, qh->period,
-				frame, uframe, qh->gap_uf);
-			do {
-				if (unlikely (ehci->pshadow [frame].ptr != 0)) {
-// FIXME -- just link toward the end, before any qh with a shorter period,
-// AND handle it already being (implicitly) linked into this frame
-// AS WELL AS updating the check_period() logic
-					BUG ();
-				} else {
-					ehci->pshadow [frame].qh = qh_get (qh);
-					ehci->periodic [frame] =
-						QH_NEXT (qh->qh_dma);
-				}
-				wmb ();
-				frame += qh->period;
-			} while (frame < ehci->periodic_size);
-
-			/* update bandwidth utilization records (for usbfs) */
-			usb_claim_bandwidth (urb->dev, urb,
-				(qh->usecs + qh->c_usecs) / qh->period, 0);
-
-			/* maybe enable periodic schedule processing */
-			if (!ehci->periodic_sched++)
-				status = enable_periodic (ehci);
-			break;
-
-		} while (frame);
-	}
-	spin_unlock_irqrestore (&ehci->lock, flags);
 done:
+	spin_unlock_irqrestore (&ehci->lock, flags);
 	if (status)
 		qtd_list_free (ehci, urb, qtd_list);
 
@@ -496,10 +503,6 @@
 	struct ehci_qh	*qh,
 	unsigned long	flags		/* caller owns ehci->lock ... */
 ) {
-	struct ehci_qtd	*qtd;
-	struct urb	*urb;
-	int		unlinking;
-
 	/* nothing to report? */
 	if (likely ((qh->hw_token & __constant_cpu_to_le32 (QTD_STS_ACTIVE))
 			!= 0))
@@ -509,43 +512,14 @@
 		return flags;
 	}
 	
-	qtd = list_entry (qh->qtd_list.next, struct ehci_qtd, qtd_list);
-	urb = qtd->urb;
-	unlinking = (urb->status == -ENOENT) || (urb->status == -ECONNRESET);
-
-	/* call any completions, after patching for reactivation */
+	/* handle any completions */
 	spin_unlock_irqrestore (&ehci->lock, flags);
-	/* NOTE:  currently restricted to one qtd per qh! */
-	if (qh_completions (ehci, qh, 0) == 0)
-		urb = 0;
+	qh_completions (ehci, qh);
 	spin_lock_irqsave (&ehci->lock, flags);
 
-	/* never reactivate requests that were unlinked ... */
-	if (likely (urb != 0)) {
-		if (unlinking
-				|| urb->status == -ECONNRESET
-				|| urb->status == -ENOENT
-				// || (urb->dev == null)
-				|| ehci->hcd.state == USB_STATE_HALT)
-			urb = 0;
-		// FIXME look at all those unlink cases ... we always
-		// need exactly one completion that reports unlink.
-		// the one above might not have been it!
-	}
-
-	/* normally reactivate */
-	if (likely (urb != 0)) {
-		if (usb_pipeout (urb->pipe))
-			pci_dma_sync_single (ehci->hcd.pdev,
-				qtd->buf_dma,
-				urb->transfer_buffer_length,
-				PCI_DMA_TODEVICE);
-		urb->status = -EINPROGRESS;
-		urb->actual_length = 0;
+	if (unlikely (list_empty (&qh->qtd_list)))
+		intr_deschedule (ehci, qh, 0);
 
-		/* patch qh and restart */
-		qh_update (qh, qtd);
-	}
 	return flags;
 }
 
diff -Nru a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
--- a/drivers/usb/host/ehci.h	Mon Aug 12 10:57:46 2002
+++ b/drivers/usb/host/ehci.h	Mon Aug 12 10:57:46 2002
@@ -299,6 +299,7 @@
 	u8			c_usecs;	/* ... split completion bw */
 	unsigned short		period;		/* polling interval */
 	unsigned short		start;		/* where polling starts */
+#define NO_FRAME ((unsigned short)~0)			/* pick new start */
 
 } __attribute__ ((aligned (32)));
 
