<html><head><meta name="color-scheme" content="light dark"></head><body><pre style="word-wrap: break-word; white-space: pre-wrap;">
From: Nick Piggin &lt;nickpiggin@yahoo.com.au&gt;

Similarly to the earlier change in load_balance, only lock the runqueue in
load_balance_newidle if the busiest queue found has a nr_running &gt; 1.  This
will reduce frequency of expensive remote runqueue lock aquisitions in the
schedule() path on some workloads.

Signed-off-by: Nick Piggin &lt;npiggin@suse.de&gt;
Acked-by: Ingo Molnar &lt;mingo@elte.hu&gt;
Signed-off-by: Andrew Morton &lt;akpm@osdl.org&gt;
---

 kernel/sched.c |   17 ++++++++++-------
 1 files changed, 10 insertions(+), 7 deletions(-)

diff -puN kernel/sched.c~sched-less-newidle-locking kernel/sched.c
--- devel/kernel/sched.c~sched-less-newidle-locking	2005-09-07 20:11:03.000000000 -0700
+++ devel-akpm/kernel/sched.c	2005-09-07 20:11:03.000000000 -0700
@@ -2181,8 +2181,7 @@ static int load_balance(int this_cpu, ru
 		 */
 		double_lock_balance(this_rq, busiest);
 		nr_moved = move_tasks(this_rq, this_cpu, busiest,
-						imbalance, sd, idle,
-						&amp;all_pinned);
+					imbalance, sd, idle, &amp;all_pinned);
 		spin_unlock(&amp;busiest-&gt;lock);
 
 		/* All tasks on this runqueue were pinned by CPU affinity */
@@ -2277,18 +2276,22 @@ static int load_balance_newidle(int this
 
 	BUG_ON(busiest == this_rq);
 
-	/* Attempt to move tasks */
-	double_lock_balance(this_rq, busiest);
-
 	schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance);
-	nr_moved = move_tasks(this_rq, this_cpu, busiest,
+
+	nr_moved = 0;
+	if (busiest-&gt;nr_running &gt; 1) {
+		/* Attempt to move tasks */
+		double_lock_balance(this_rq, busiest);
+		nr_moved = move_tasks(this_rq, this_cpu, busiest,
 					imbalance, sd, NEWLY_IDLE, NULL);
+		spin_unlock(&amp;busiest-&gt;lock);
+	}
+
 	if (!nr_moved)
 		schedstat_inc(sd, lb_failed[NEWLY_IDLE]);
 	else
 		sd-&gt;nr_balance_failed = 0;
 
-	spin_unlock(&amp;busiest-&gt;lock);
 	return nr_moved;
 
 out_balanced:
_
</pre></body></html>