From: Milan Broz <mbroz@redhat.com>

Process write request in separate function and queue
final bio through io workqueue.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Milan Broz <mbroz@redhat.com>
---
 drivers/md/dm-crypt.c |   70 +++++++++++++++++++++++++++++---------------------
 1 files changed, 41 insertions(+), 29 deletions(-)

Index: linux-2.6.24-rc1/drivers/md/dm-crypt.c
===================================================================
--- linux-2.6.24-rc1.orig/drivers/md/dm-crypt.c	2007-11-06 01:04:17.000000000 +0000
+++ linux-2.6.24-rc1/drivers/md/dm-crypt.c	2007-11-06 01:05:25.000000000 +0000
@@ -582,23 +582,38 @@ static void crypt_write_io(struct dm_cry
 
 static void crypt_write_io_done(struct dm_crypt_io *io, int error)
 {
+	struct bio *clone = io->ctx.bio_out;
+	struct crypt_config *cc = io->target->private;
+
+	if (unlikely(error < 0)) {
+		crypt_free_buffer_pages(cc, clone);
+		bio_put(clone);
+		io->error = -EIO;
+		crypt_dec_pending(io);
+		return;
+	}
+
+	/* crypt_convert should have filled the clone bio */
+	BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
+
+	clone->bi_sector = cc->start + io->sector;
+	io->sector += bio_sectors(clone);
+
+	generic_make_request(clone);
 }
 
-static void crypt_write_io_process(struct dm_crypt_io *io)
+static void crypt_write_loop(struct dm_crypt_io *io)
 {
 	struct crypt_config *cc = io->target->private;
-	struct bio *base_bio = io->base_bio;
 	struct bio *clone;
-	unsigned remaining = base_bio->bi_size;
-
-	atomic_inc(&io->pending);
-
-	crypt_convert_init(cc, &io->ctx, NULL, base_bio, io->sector);
+	unsigned remaining = io->base_bio->bi_size;
+	int r = 0;
 
 	/*
 	 * The allocated buffers can be smaller than the whole bio,
 	 * so repeat the whole process until all the data can be handled.
 	 */
+
 	while (remaining) {
 		clone = crypt_alloc_buffer(io, remaining);
 		if (unlikely(!clone)) {
@@ -610,37 +625,34 @@ static void crypt_write_process(struct d
 		io->ctx.bio_out = clone;
 		io->ctx.idx_out = 0;
 
-		if (unlikely(crypt_convert(cc, &io->ctx) < 0)) {
-			crypt_free_buffer_pages(cc, clone);
-			bio_put(clone);
-			io->error = -EIO;
-			crypt_dec_pending(io);
-			return;
-		}
-
-		/* crypt_convert should have filled the clone bio */
-		BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
-
-		clone->bi_sector = cc->start + io->sector;
 		remaining -= clone->bi_size;
-		io->sector += bio_sectors(clone);
-
-		/* Grab another reference to the io struct
-		 * before we kick off the request */
-		if (remaining)
-			atomic_inc(&io->pending);
 
-		generic_make_request(clone);
+		r = crypt_convert(cc, &io->ctx);
 
-		/* Do not reference clone after this - it
-		 * may be gone already. */
+		crypt_write_done(io, r);
+		if (unlikely(r < 0))
+			return;
 
 		/* out of memory -> run queues */
-		if (remaining)
+		if (unlikely(remaining)) {
+			atomic_inc(&io->pending);
 			congestion_wait(WRITE, HZ/100);
+		}
 	}
 }
 
+static void crypt_write_io_process(struct dm_crypt_io *io)
+{
+	struct crypt_config *cc = io->target->private;
+
+	atomic_add(2, &io->pending);
+
+	crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector);
+	crypt_write_loop(io);
+
+	crypt_dec_pending(io);
+}
+
 static void crypt_read_io_done(struct dm_crypt_io *io, int error)
 {
 	if (unlikely(error < 0))
