[packages/kernel] - dm-crypt-remove-io-pool patch updated for 3.17 - bio-kmalloc-align applied upstream - dm-crypt-per

lkrotowski lkrotowski at pld-linux.org
Mon Nov 24 16:02:59 CET 2014


commit c084197701057bb85ad97cbefb2ee052dcd39aab
Author: Łukasz Krotowski <lkrotowski at pld-linux.org>
Date:   Sat Nov 8 01:53:34 2014 +0100

    - dm-crypt-remove-io-pool patch updated for 3.17
    - bio-kmalloc-align applied upstream
    - dm-crypt-per_bio_data applied upstream

 bio-kmalloc-align.patch       |  37 -----------
 dm-crypt-per_bio_data.patch   | 138 ------------------------------------------
 dm-crypt-remove-io-pool.patch |  11 ++--
 kernel.spec                   |  16 ++---
 4 files changed, 12 insertions(+), 190 deletions(-)
---
diff --git a/kernel.spec b/kernel.spec
index b48179c..b4d636c 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -225,14 +225,12 @@ Patch250:	kernel-fix_256colors_menuconfig.patch
 Patch400:	kernel-virtio-gl-accel.patch
 
 # http://people.redhat.com/mpatocka/patches/kernel/dm-crypt-paralelizace/current/series.html
-Patch500:	bio-kmalloc-align.patch
-Patch501:	dm-crypt-per_bio_data.patch
-Patch502:	dm-crypt-unbound-workqueue.patch
-Patch503:	dm-crypt-dont-allocate-partial-pages.patch
-Patch504:	dm-crypt-fix-allocation-deadlock.patch
-Patch505:	dm-crypt-remove-io-pool.patch
-Patch506:	dm-crypt-offload-writes-to-thread.patch
-Patch507:	dm-crypt-sort-requests.patch
+Patch500:	dm-crypt-unbound-workqueue.patch
+Patch501:	dm-crypt-dont-allocate-partial-pages.patch
+Patch502:	dm-crypt-fix-allocation-deadlock.patch
+Patch503:	dm-crypt-remove-io-pool.patch
+Patch504:	dm-crypt-offload-writes-to-thread.patch
+Patch505:	dm-crypt-sort-requests.patch
 
 Patch2000:	kernel-small_fixes.patch
 Patch2001:	kernel-pwc-uncompress.patch
@@ -740,8 +738,6 @@ cd linux-%{basever}
 %patch503 -p1
 %patch504 -p1
 %patch505 -p1
-%patch506 -p1
-%patch507 -p1
 %endif
 
 %endif # vanilla
diff --git a/bio-kmalloc-align.patch b/bio-kmalloc-align.patch
deleted file mode 100644
index a697401..0000000
--- a/bio-kmalloc-align.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-bio: use kmalloc alignment for bio slab
-
-Various subsystems can ask the bio subsystem to create a bio slab cache
-with some free space before the bio. This free space can be used for any
-purpose. Device mapper uses this feature to place some target-specific and
-device-mapper specific data before the bio, so that the target-specific
-data doesn't have to be allocated separatedly.
-
-This mechanism is used in place of kmalloc, so we need that the allocated
-slab have the same memory alignment as memory allocated with kmalloc.
-
-This patch changes the function bio_find_or_create_slab so that it uses
-ARCH_KMALLOC_MINALIGN alignment when creating the slab cache. This patch
-is needed so that dm-crypt can use per-bio data for encryption - the
-crypto subsystem assumes that these data have the same alignment as
-kmallocated memory.
-
-Signed-off-by: Mikulas Patocka <mpatocka at redhat.com>
-
----
- block/bio.c |    3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
-Index: linux-3.14-rc3/block/bio.c
-===================================================================
---- linux-3.14-rc3.orig/block/bio.c	2014-02-23 23:53:50.000000000 +0100
-+++ linux-3.14-rc3/block/bio.c	2014-02-23 23:55:00.000000000 +0100
-@@ -112,7 +112,8 @@ static struct kmem_cache *bio_find_or_cr
- 	bslab = &bio_slabs[entry];
- 
- 	snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
--	slab = kmem_cache_create(bslab->name, sz, 0, SLAB_HWCACHE_ALIGN, NULL);
-+	slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
-+				 SLAB_HWCACHE_ALIGN, NULL);
- 	if (!slab)
- 		goto out_unlock;
- 
diff --git a/dm-crypt-per_bio_data.patch b/dm-crypt-per_bio_data.patch
deleted file mode 100644
index bfb0288..0000000
--- a/dm-crypt-per_bio_data.patch
+++ /dev/null
@@ -1,138 +0,0 @@
-dm-crypt: use per-bio data
-
-This patch changes dm-crypt so that it uses auxiliary data allocated with
-the bio.
-
-Dm-crypt requires two allocations per request - struct dm_crypt_io and
-struct ablkcipher_request (with other data appended to it). It used
-mempool for the allocation.
-
-Some requests may require more dm_crypt_ios and ablkcipher_requests,
-however most requests need just one of each of these two structures to
-complete.
-
-This patch changes it so that the first dm_crypt_io and ablkcipher_request
-and allocated with the bio (using target per_bio_data_size option). If the
-request needs additional values, they are allocated from the mempool.
-
-Signed-off-by: Mikulas Patocka <mpatocka at redhat.com>
-
----
- drivers/md/dm-crypt.c |   40 ++++++++++++++++++++++++++--------------
- 1 file changed, 26 insertions(+), 14 deletions(-)
-
-Index: linux-3.14-rc4/drivers/md/dm-crypt.c
-===================================================================
---- linux-3.14-rc4.orig/drivers/md/dm-crypt.c	2014-02-27 17:48:31.000000000 +0100
-+++ linux-3.14-rc4/drivers/md/dm-crypt.c	2014-02-27 17:48:31.000000000 +0100
-@@ -59,7 +59,7 @@ struct dm_crypt_io {
- 	int error;
- 	sector_t sector;
- 	struct dm_crypt_io *base_io;
--};
-+} CRYPTO_MINALIGN_ATTR;
- 
- struct dm_crypt_request {
- 	struct convert_context *ctx;
-@@ -162,6 +162,8 @@ struct crypt_config {
- 	 */
- 	unsigned int dmreq_start;
- 
-+	unsigned int per_bio_data_size;
-+
- 	unsigned long flags;
- 	unsigned int key_size;
- 	unsigned int key_parts;      /* independent parts in key buffer */
-@@ -895,6 +897,14 @@ static void crypt_alloc_req(struct crypt
- 	    kcryptd_async_done, dmreq_of_req(cc, ctx->req));
- }
- 
-+static void crypt_free_req(struct crypt_config *cc,
-+			   struct ablkcipher_request *req, struct bio *base_bio)
-+{
-+	struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
-+	if ((struct ablkcipher_request *)(io + 1) != req)
-+		mempool_free(req, cc->req_pool);
-+}
-+
- /*
-  * Encrypt / decrypt data from one bio to another one (can be the same one)
-  */
-@@ -1008,12 +1018,9 @@ static void crypt_free_buffer_pages(stru
- 	}
- }
- 
--static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,
--					  struct bio *bio, sector_t sector)
-+static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
-+			  struct bio *bio, sector_t sector)
- {
--	struct dm_crypt_io *io;
--
--	io = mempool_alloc(cc->io_pool, GFP_NOIO);
- 	io->cc = cc;
- 	io->base_bio = bio;
- 	io->sector = sector;
-@@ -1021,8 +1028,6 @@ static struct dm_crypt_io *crypt_io_allo
- 	io->base_io = NULL;
- 	io->ctx.req = NULL;
- 	atomic_set(&io->io_pending, 0);
--
--	return io;
- }
- 
- static void crypt_inc_pending(struct dm_crypt_io *io)
-@@ -1046,8 +1051,9 @@ static void crypt_dec_pending(struct dm_
- 		return;
- 
- 	if (io->ctx.req)
--		mempool_free(io->ctx.req, cc->req_pool);
--	mempool_free(io, cc->io_pool);
-+		crypt_free_req(cc, io->ctx.req, base_bio);
-+	if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size))
-+		mempool_free(io, cc->io_pool);
- 
- 	if (likely(!base_io))
- 		bio_endio(base_bio, error);
-@@ -1255,8 +1261,8 @@ static void kcryptd_crypt_write_convert(
- 		 * between fragments, so switch to a new dm_crypt_io structure.
- 		 */
- 		if (unlikely(!crypt_finished && remaining)) {
--			new_io = crypt_io_alloc(io->cc, io->base_bio,
--						sector);
-+			new_io = mempool_alloc(cc->io_pool, GFP_NOIO);
-+			crypt_io_init(new_io, io->cc, io->base_bio, sector);
- 			crypt_inc_pending(new_io);
- 			crypt_convert_init(cc, &new_io->ctx, NULL,
- 					   io->base_bio, sector);
-@@ -1325,7 +1331,7 @@ static void kcryptd_async_done(struct cr
- 	if (error < 0)
- 		io->error = -EIO;
- 
--	mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
-+	crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
- 
- 	if (!atomic_dec_and_test(&ctx->cc_pending))
- 		return;
-@@ -1728,6 +1734,10 @@ static int crypt_ctr(struct dm_target *t
- 		goto bad;
- 	}
- 
-+	cc->per_bio_data_size = ti->per_bio_data_size =
-+				sizeof(struct dm_crypt_io) + cc->dmreq_start +
-+				sizeof(struct dm_crypt_request) + cc->iv_size;
-+
- 	cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
- 	if (!cc->page_pool) {
- 		ti->error = "Cannot allocate page mempool";
-@@ -1824,7 +1834,9 @@ static int crypt_map(struct dm_target *t
- 		return DM_MAPIO_REMAPPED;
- 	}
- 
--	io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
-+	io = dm_per_bio_data(bio, cc->per_bio_data_size);
-+	crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
-+	io->ctx.req = (struct ablkcipher_request *)(io + 1);
- 
- 	if (bio_data_dir(io->base_bio) == READ) {
- 		if (kcryptd_io_read(io, GFP_NOWAIT))
diff --git a/dm-crypt-remove-io-pool.patch b/dm-crypt-remove-io-pool.patch
index 5217724..1ad444b 100644
--- a/dm-crypt-remove-io-pool.patch
+++ b/dm-crypt-remove-io-pool.patch
@@ -47,7 +47,7 @@ Index: linux-3.14/drivers/md/dm-crypt.c
  
  	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
  		cc->iv_gen_ops->dtr(cc);
-@@ -1664,19 +1657,13 @@ static int crypt_ctr(struct dm_target *t
+@@ -1658,13 +1658,6 @@
  	if (ret < 0)
  		goto bad;
  
@@ -60,13 +60,14 @@ Index: linux-3.14/drivers/md/dm-crypt.c
 -
  	cc->dmreq_start = sizeof(struct ablkcipher_request);
  	cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
- 	cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
- 	cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) &
- 			   ~(crypto_tfm_ctx_alignment() - 1);
+ 	cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
+@@ -1682,6 +1675,7 @@
+ 		iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
+ 	}
  
 +	ret = -ENOMEM;
  	cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
- 			sizeof(struct dm_crypt_request) + cc->iv_size);
+ 			sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
  	if (!cc->req_pool) {
 @@ -1938,14 +1925,9 @@ static int __init dm_crypt_init(void)
  {
================================================================

---- gitweb:

http://git.pld-linux.org/gitweb.cgi/packages/kernel.git/commitdiff/c084197701057bb85ad97cbefb2ee052dcd39aab



More information about the pld-cvs-commit mailing list