From: Milan Broz <mbroz@redhat.com>

dm-crypt: Use crypto ablkcipher interface

Move encrypt/decrypt core to async crypto call.

[MB FIXME: Need following patch to work/compile]

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Milan Broz <mbroz@redhat.com>
---
 drivers/md/dm-crypt.c |  109 +++++++++++++++++++++++++++-----------------------
 1 files changed, 60 insertions(+), 49 deletions(-)

Index: linux-2.6.24-rc5/drivers/md/dm-crypt.c
===================================================================
--- linux-2.6.24-rc5.orig/drivers/md/dm-crypt.c	2007-12-12 15:44:08.000000000 +0000
+++ linux-2.6.24-rc5/drivers/md/dm-crypt.c	2007-12-12 15:44:09.000000000 +0000
@@ -318,38 +318,6 @@ static struct crypt_iv_operations crypt_
 	.generator = crypt_iv_null_gen
 };
 
-static int
-crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
-                          struct scatterlist *in, unsigned int length,
-                          int write, sector_t sector)
-{
-	u8 iv[cc->iv_size] __attribute__ ((aligned(__alignof__(u64))));
-	struct blkcipher_desc desc = {
-		.tfm = cc->tfm,
-		.info = iv,
-		.flags = CRYPTO_TFM_REQ_MAY_SLEEP,
-	};
-	int r;
-
-	if (cc->iv_gen_ops) {
-		r = cc->iv_gen_ops->generator(cc, iv, sector);
-		if (r < 0)
-			return r;
-
-		if (write)
-			r = crypto_blkcipher_encrypt_iv(&desc, out, in, length);
-		else
-			r = crypto_blkcipher_decrypt_iv(&desc, out, in, length);
-	} else {
-		if (write)
-			r = crypto_blkcipher_encrypt(&desc, out, in, length);
-		else
-			r = crypto_blkcipher_decrypt(&desc, out, in, length);
-	}
-
-	return r;
-}
-
 static void crypt_convert_init(struct crypt_config *cc,
 			       struct convert_context *ctx,
 			       struct bio *bio_out, struct bio *bio_in,
@@ -367,18 +335,25 @@ static void crypt_convert_init(struct cr
 }
 
 static int crypt_convert_block(struct crypt_config *cc,
-			       struct convert_context *ctx)
+			       struct convert_context *ctx,
+			       struct ablkcipher_request *req)
 {
 	struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
 	struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
-	struct dm_crypt_request dmreq;
+	struct dm_crypt_request *dmreq;
+	u8 *iv;
+	int r = 0;
 
-	sg_init_table(&dmreq.sg_in, 1);
-	sg_set_page(&dmreq.sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
+	dmreq = (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
+	iv = (u8 *)ALIGN((unsigned long)(dmreq + 1),
+			 crypto_ablkcipher_alignmask(cc->tfm) + 1);
+
+	sg_init_table(&dmreq->sg_in, 1);
+	sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
 		    bv_in->bv_offset + ctx->offset_in);
 
-	sg_init_table(&dmreq.sg_out, 1);
-	sg_set_page(&dmreq.sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
+	sg_init_table(&dmreq->sg_out, 1);
+	sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
 		    bv_out->bv_offset + ctx->offset_out);
 
 	ctx->offset_in += 1 << SECTOR_SHIFT;
@@ -393,10 +368,21 @@ static int crypt_convert_block(struct cr
 		ctx->idx_out++;
 	}
 
-	return crypt_convert_scatterlist(cc, &dmreq.sg_out, &dmreq.sg_in,
-					 dmreq.sg_in.length,
-					 bio_data_dir(ctx->bio_in) == WRITE,
-					 ctx->sector);
+	if (cc->iv_gen_ops) {
+		r = cc->iv_gen_ops->generator(cc, iv, ctx->sector);
+		if (r < 0)
+			return r;
+	}
+
+	ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
+				     1 << SECTOR_SHIFT, iv);
+
+	if (bio_data_dir(ctx->bio_in) == READ)
+		r = crypto_ablkcipher_encrypt(req);
+	else
+		r = crypto_ablkcipher_decrypt(req);
+
+	return r;
 }
 
 static void kcryptd_async_done(struct crypto_async_request *async_req, int error);
@@ -421,9 +407,26 @@ static int crypt_convert(struct crypt_co
 
 	while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
 	      ctx->idx_out < ctx->bio_out->bi_vcnt) {
-		r = crypt_convert_block(cc, ctx);
-		if (r < 0)
-			break;
+
+		crypt_alloc_req(cc, ctx);
+
+		r = crypt_convert_block(cc, ctx, cc->req);
+
+		switch (r) {
+		case -EBUSY:
+			wait_for_completion(&ctx->restart);
+			INIT_COMPLETION(ctx->restart);
+			/* fall through*/
+		case -EINPROGRESS:
+			atomic_inc(&ctx->pending);
+			cc->req = NULL;
+			r = 0;
+			/* fall through*/
+		case 0:
+			continue;
+		}
+
+		break;
 
 		ctx->sector++;
 	}
@@ -686,9 +689,12 @@ static void kcryptd_crypt_write_convert_
 
 		r = crypt_convert(cc, &io->ctx);
 
-		kcryptd_crypt_write_io_submit(io, r, 0);
-		if (unlikely(r < 0))
-			return;
+		if (r != -EINPROGRESS) {
+			kcryptd_crypt_write_io_submit(io, r, 0);
+			if (unlikely(r < 0))
+				return;
+		} else
+			atomic_inc(&io->pending);
 
 		/* out of memory -> run queues */
 		if (unlikely(remaining))
@@ -724,11 +730,16 @@ static void kcryptd_crypt_read_convert(s
 	struct crypt_config *cc = io->target->private;
 	int r = 0;
 
+	atomic_inc(&io->pending);
+
 	crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, io->sector);
 
 	r = crypt_convert(cc, &io->ctx);
 
-	kcryptd_crypt_read_done(io, r);
+	if (r != -EINPROGRESS)
+		kcryptd_crypt_read_done(io, r);
+
+	crypt_dec_pending(io);
 }
 
 static void kcryptd_async_done(struct crypto_async_request *async_req, int error)
