[packages/kernel] - removed upstreamed dm-crypt patches

lkrotowski lkrotowski at pld-linux.org
Mon Jun 8 20:06:04 CEST 2015


commit 67ed654c0c1b8b72767d538b14969b2174fe70c9
Author: Łukasz Krotowski <lkrotowski at pld-linux.org>
Date:   Mon Jun 8 20:04:12 2015 +0200

    - removed upstreamed dm-crypt patches

 dm-crypt-dont-allocate-partial-pages.patch | 259 -----------------------------
 dm-crypt-fix-allocation-deadlock.patch     | 114 -------------
 dm-crypt-offload-writes-to-thread.patch    | 232 --------------------------
 dm-crypt-remove-io-pool.patch              |  94 -----------
 dm-crypt-sort-requests.patch               | 137 ---------------
 dm-crypt-unbound-workqueue.patch           |  24 ---
 6 files changed, 860 deletions(-)
---
diff --git a/dm-crypt-dont-allocate-partial-pages.patch b/dm-crypt-dont-allocate-partial-pages.patch
deleted file mode 100644
index b387f5d..0000000
--- a/dm-crypt-dont-allocate-partial-pages.patch
+++ /dev/null
@@ -1,259 +0,0 @@
-dm-crypt: don't allocate pages for a partial request.
-
-This patch changes crypt_alloc_buffer so that it always allocates pages for
-a full request.
-
-This change enables further simplification and removing of one refcounts
-in the next patches.
-
-Note: the next patch is needed to fix a theoretical deadlock
-
-Signed-off-by: Mikulas Patocka <mpatocka at redhat.com>
-
----
- drivers/md/dm-crypt.c |  139 ++++++++++----------------------------------------
- 1 file changed, 30 insertions(+), 109 deletions(-)
-
-Index: linux-3.14/drivers/md/dm-crypt.c
-===================================================================
---- linux-3.14.orig/drivers/md/dm-crypt.c	2014-04-04 20:48:50.000000000 +0200
-+++ linux-3.14/drivers/md/dm-crypt.c	2014-04-04 20:57:36.000000000 +0200
-@@ -58,7 +58,6 @@ struct dm_crypt_io {
- 	atomic_t io_pending;
- 	int error;
- 	sector_t sector;
--	struct dm_crypt_io *base_io;
- } CRYPTO_MINALIGN_ATTR;
- 
- struct dm_crypt_request {
-@@ -172,7 +171,6 @@ struct crypt_config {
- };
- 
- #define MIN_IOS        16
--#define MIN_POOL_PAGES 32
- 
- static struct kmem_cache *_crypt_io_pool;
- 
-@@ -951,14 +949,13 @@ static int crypt_convert(struct crypt_co
- 	return 0;
- }
- 
-+static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
-+
- /*
-  * Generate a new unfragmented bio with the given size
-  * This should never violate the device limitations
-- * May return a smaller bio when running out of pages, indicated by
-- * *out_of_pages set to 1.
-  */
--static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
--				      unsigned *out_of_pages)
-+static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
- {
- 	struct crypt_config *cc = io->cc;
- 	struct bio *clone;
-@@ -966,41 +963,27 @@ static struct bio *crypt_alloc_buffer(st
- 	gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
- 	unsigned i, len;
- 	struct page *page;
-+	struct bio_vec *bvec;
- 
- 	clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
- 	if (!clone)
- 		return NULL;
- 
- 	clone_init(io, clone);
--	*out_of_pages = 0;
- 
- 	for (i = 0; i < nr_iovecs; i++) {
- 		page = mempool_alloc(cc->page_pool, gfp_mask);
--		if (!page) {
--			*out_of_pages = 1;
--			break;
--		}
--
--		/*
--		 * If additional pages cannot be allocated without waiting,
--		 * return a partially-allocated bio.  The caller will then try
--		 * to allocate more bios while submitting this partial bio.
--		 */
--		gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
- 
- 		len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
- 
--		if (!bio_add_page(clone, page, len, 0)) {
--			mempool_free(page, cc->page_pool);
--			break;
--		}
-+		bvec = &clone->bi_io_vec[clone->bi_vcnt++];
-+		bvec->bv_page = page;
-+		bvec->bv_len = len;
-+		bvec->bv_offset = 0;
- 
--		size -= len;
--	}
-+		clone->bi_iter.bi_size += len;
- 
--	if (!clone->bi_iter.bi_size) {
--		bio_put(clone);
--		return NULL;
-+		size -= len;
- 	}
- 
- 	return clone;
-@@ -1025,7 +1008,6 @@ static void crypt_io_init(struct dm_cryp
- 	io->base_bio = bio;
- 	io->sector = sector;
- 	io->error = 0;
--	io->base_io = NULL;
- 	io->ctx.req = NULL;
- 	atomic_set(&io->io_pending, 0);
- }
-@@ -1038,13 +1020,11 @@ static void crypt_inc_pending(struct dm_
- /*
-  * One of the bios was finished. Check for completion of
-  * the whole request and correctly clean up the buffer.
-- * If base_io is set, wait for the last fragment to complete.
-  */
- static void crypt_dec_pending(struct dm_crypt_io *io)
- {
- 	struct crypt_config *cc = io->cc;
- 	struct bio *base_bio = io->base_bio;
--	struct dm_crypt_io *base_io = io->base_io;
- 	int error = io->error;
- 
- 	if (!atomic_dec_and_test(&io->io_pending))
-@@ -1055,13 +1035,7 @@ static void crypt_dec_pending(struct dm_
- 	if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size))
- 		mempool_free(io, cc->io_pool);
- 
--	if (likely(!base_io))
--		bio_endio(base_bio, error);
--	else {
--		if (error && !base_io->error)
--			base_io->error = error;
--		crypt_dec_pending(base_io);
--	}
-+	bio_endio(base_bio, error);
- }
- 
- /*
-@@ -1197,10 +1171,7 @@ static void kcryptd_crypt_write_convert(
- {
- 	struct crypt_config *cc = io->cc;
- 	struct bio *clone;
--	struct dm_crypt_io *new_io;
- 	int crypt_finished;
--	unsigned out_of_pages = 0;
--	unsigned remaining = io->base_bio->bi_iter.bi_size;
- 	sector_t sector = io->sector;
- 	int r;
- 
-@@ -1210,80 +1181,30 @@ static void kcryptd_crypt_write_convert(
- 	crypt_inc_pending(io);
- 	crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
- 
--	/*
--	 * The allocated buffers can be smaller than the whole bio,
--	 * so repeat the whole process until all the data can be handled.
--	 */
--	while (remaining) {
--		clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
--		if (unlikely(!clone)) {
--			io->error = -ENOMEM;
--			break;
--		}
--
--		io->ctx.bio_out = clone;
--		io->ctx.iter_out = clone->bi_iter;
--
--		remaining -= clone->bi_iter.bi_size;
--		sector += bio_sectors(clone);
--
--		crypt_inc_pending(io);
--
--		r = crypt_convert(cc, &io->ctx);
--		if (r < 0)
--			io->error = -EIO;
--
--		crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
--
--		/* Encryption was already finished, submit io now */
--		if (crypt_finished) {
--			kcryptd_crypt_write_io_submit(io, 0);
--
--			/*
--			 * If there was an error, do not try next fragments.
--			 * For async, error is processed in async handler.
--			 */
--			if (unlikely(r < 0))
--				break;
-+	clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
-+	if (unlikely(!clone)) {
-+		io->error = -EIO;
-+		goto dec;
-+	}
- 
--			io->sector = sector;
--		}
-+	io->ctx.bio_out = clone;
-+	io->ctx.iter_out = clone->bi_iter;
- 
--		/*
--		 * Out of memory -> run queues
--		 * But don't wait if split was due to the io size restriction
--		 */
--		if (unlikely(out_of_pages))
--			congestion_wait(BLK_RW_ASYNC, HZ/100);
-+	sector += bio_sectors(clone);
- 
--		/*
--		 * With async crypto it is unsafe to share the crypto context
--		 * between fragments, so switch to a new dm_crypt_io structure.
--		 */
--		if (unlikely(!crypt_finished && remaining)) {
--			new_io = mempool_alloc(cc->io_pool, GFP_NOIO);
--			crypt_io_init(new_io, io->cc, io->base_bio, sector);
--			crypt_inc_pending(new_io);
--			crypt_convert_init(cc, &new_io->ctx, NULL,
--					   io->base_bio, sector);
--			new_io->ctx.iter_in = io->ctx.iter_in;
--
--			/*
--			 * Fragments after the first use the base_io
--			 * pending count.
--			 */
--			if (!io->base_io)
--				new_io->base_io = io;
--			else {
--				new_io->base_io = io->base_io;
--				crypt_inc_pending(io->base_io);
--				crypt_dec_pending(io);
--			}
-+	crypt_inc_pending(io);
-+	r = crypt_convert(cc, &io->ctx);
-+	if (r)
-+		io->error = -EIO;
-+	crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
- 
--			io = new_io;
--		}
-+	/* Encryption was already finished, submit io now */
-+	if (crypt_finished) {
-+		kcryptd_crypt_write_io_submit(io, 0);
-+		io->sector = sector;
- 	}
- 
-+dec:
- 	crypt_dec_pending(io);
- }
- 
-@@ -1738,7 +1659,7 @@ static int crypt_ctr(struct dm_target *t
- 				sizeof(struct dm_crypt_io) + cc->dmreq_start +
- 				sizeof(struct dm_crypt_request) + cc->iv_size;
- 
--	cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
-+	cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
- 	if (!cc->page_pool) {
- 		ti->error = "Cannot allocate page mempool";
- 		goto bad;
diff --git a/dm-crypt-fix-allocation-deadlock.patch b/dm-crypt-fix-allocation-deadlock.patch
deleted file mode 100644
index 80994da..0000000
--- a/dm-crypt-fix-allocation-deadlock.patch
+++ /dev/null
@@ -1,114 +0,0 @@
-dm-crypt: avoid deadlock in mempools
-
-This patch fixes a theoretical deadlock introduced in the previous patch.
-
-The function crypt_alloc_buffer may be called concurrently. If we allocate
-from the mempool concurrently, there is a possibility of deadlock.
-For example, if we have mempool of 256 pages, two processes, each wanting 256,
-pages allocate from the mempool concurrently, it may deadlock in a situation
-where both processes have allocated 128 pages and the mempool is exhausted.
-
-In order to avoid this scenarios, we allocate the pages under a mutex.
-
-In order to not degrade performance with excessive locking, we try
-non-blocking allocations without a mutex first and if it fails, we fallback
-to a blocking allocation with a mutex.
-
-Signed-off-by: Mikulas Patocka <mpatocka at redhat.com>
-
----
- drivers/md/dm-crypt.c |   41 ++++++++++++++++++++++++++++++++++++-----
- 1 file changed, 36 insertions(+), 5 deletions(-)
-
-Index: linux-3.14/drivers/md/dm-crypt.c
-===================================================================
---- linux-3.14.orig/drivers/md/dm-crypt.c	2014-04-04 20:59:46.000000000 +0200
-+++ linux-3.14/drivers/md/dm-crypt.c	2014-04-04 21:04:40.000000000 +0200
-@@ -124,6 +124,7 @@ struct crypt_config {
- 	mempool_t *req_pool;
- 	mempool_t *page_pool;
- 	struct bio_set *bs;
-+	struct mutex bio_alloc_lock;
- 
- 	struct workqueue_struct *io_queue;
- 	struct workqueue_struct *crypt_queue;
-@@ -954,27 +955,51 @@ static void crypt_free_buffer_pages(stru
- /*
-  * Generate a new unfragmented bio with the given size
-  * This should never violate the device limitations
-+ *
-+ * This function may be called concurrently. If we allocate from the mempool
-+ * concurrently, there is a possibility of deadlock. For example, if we have
-+ * mempool of 256 pages, two processes, each wanting 256, pages allocate from
-+ * the mempool concurrently, it may deadlock in a situation where both processes
-+ * have allocated 128 pages and the mempool is exhausted.
-+ *
-+ * In order to avoid this scenarios, we allocate the pages under a mutex.
-+ *
-+ * In order to not degrade performance with excessive locking, we try
-+ * non-blocking allocations without a mutex first and if it fails, we fallback
-+ * to a blocking allocation with a mutex.
-  */
- static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
- {
- 	struct crypt_config *cc = io->cc;
- 	struct bio *clone;
- 	unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
--	gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
--	unsigned i, len;
-+	gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
-+	unsigned i, len, remaining_size;
- 	struct page *page;
- 	struct bio_vec *bvec;
- 
-+retry:
-+	if (unlikely(gfp_mask & __GFP_WAIT))
-+		mutex_lock(&cc->bio_alloc_lock);
-+
- 	clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
- 	if (!clone)
--		return NULL;
-+		goto return_clone;
- 
- 	clone_init(io, clone);
- 
-+	remaining_size = size;
-+
- 	for (i = 0; i < nr_iovecs; i++) {
- 		page = mempool_alloc(cc->page_pool, gfp_mask);
-+		if (!page) {
-+			crypt_free_buffer_pages(cc, clone);
-+			bio_put(clone);
-+			gfp_mask |= __GFP_WAIT;
-+			goto retry;
-+		}
- 
--		len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
-+		len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
- 
- 		bvec = &clone->bi_io_vec[clone->bi_vcnt++];
- 		bvec->bv_page = page;
-@@ -983,9 +1008,13 @@ static struct bio *crypt_alloc_buffer(st
- 
- 		clone->bi_iter.bi_size += len;
- 
--		size -= len;
-+		remaining_size -= len;
- 	}
- 
-+return_clone:
-+	if (unlikely(gfp_mask & __GFP_WAIT))
-+		mutex_unlock(&cc->bio_alloc_lock);
-+
- 	return clone;
- }
- 
-@@ -1671,6 +1700,8 @@ static int crypt_ctr(struct dm_target *t
- 		goto bad;
- 	}
- 
-+	mutex_init(&cc->bio_alloc_lock);
-+
- 	ret = -EINVAL;
- 	if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
- 		ti->error = "Invalid iv_offset sector";
diff --git a/dm-crypt-offload-writes-to-thread.patch b/dm-crypt-offload-writes-to-thread.patch
deleted file mode 100644
index 8fdec56..0000000
--- a/dm-crypt-offload-writes-to-thread.patch
+++ /dev/null
@@ -1,232 +0,0 @@
-dm-crypt: offload writes to thread
-
-Submitting write bios directly in the encryption thread caused serious
-performance degradation. On multiprocessor machine encryption requests
-finish in a different order than they were submitted in. Consequently, write
-requests would be submitted in a different order and it could cause severe
-performance degradation.
-
-This patch moves submitting write requests to a separate thread so that
-the requests can be sorted before submitting.
-
-Sorting is implemented in the next patch.
-
-Note: it is required that a previous patch "dm-crypt: don't allocate pages
-for a partial request." is applied before applying this patch. Without
-that, this patch could introduce a crash.
-
-Signed-off-by: Mikulas Patocka <mpatocka at redhat.com>
-
----
- drivers/md/dm-crypt.c |  120 ++++++++++++++++++++++++++++++++++++++++----------
- 1 file changed, 97 insertions(+), 23 deletions(-)
-
-Index: linux-3.14/drivers/md/dm-crypt.c
-===================================================================
---- linux-3.14.orig/drivers/md/dm-crypt.c	2014-04-04 21:05:40.000000000 +0200
-+++ linux-3.14/drivers/md/dm-crypt.c	2014-04-04 21:06:22.000000000 +0200
-@@ -18,6 +18,7 @@
- #include <linux/slab.h>
- #include <linux/crypto.h>
- #include <linux/workqueue.h>
-+#include <linux/kthread.h>
- #include <linux/backing-dev.h>
- #include <linux/atomic.h>
- #include <linux/scatterlist.h>
-@@ -58,6 +59,8 @@ struct dm_crypt_io {
- 	atomic_t io_pending;
- 	int error;
- 	sector_t sector;
-+
-+	struct list_head list;
- } CRYPTO_MINALIGN_ATTR;
- 
- struct dm_crypt_request {
-@@ -128,6 +131,10 @@ struct crypt_config {
- 	struct workqueue_struct *io_queue;
- 	struct workqueue_struct *crypt_queue;
- 
-+	struct task_struct *write_thread;
-+	wait_queue_head_t write_thread_wait;
-+	struct list_head write_thread_list;
-+
- 	char *cipher;
- 	char *cipher_string;
- 
-@@ -1141,37 +1148,89 @@ static int kcryptd_io_read(struct dm_cry
- 	return 0;
- }
- 
-+static void kcryptd_io_read_work(struct work_struct *work)
-+{
-+	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
-+
-+	crypt_inc_pending(io);
-+	if (kcryptd_io_read(io, GFP_NOIO))
-+		io->error = -ENOMEM;
-+	crypt_dec_pending(io);
-+}
-+
-+static void kcryptd_queue_read(struct dm_crypt_io *io)
-+{
-+	struct crypt_config *cc = io->cc;
-+
-+	INIT_WORK(&io->work, kcryptd_io_read_work);
-+	queue_work(cc->io_queue, &io->work);
-+}
-+
- static void kcryptd_io_write(struct dm_crypt_io *io)
- {
- 	struct bio *clone = io->ctx.bio_out;
-+
- 	generic_make_request(clone);
- }
- 
--static void kcryptd_io(struct work_struct *work)
-+static int dmcrypt_write(void *data)
- {
--	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
-+	struct crypt_config *cc = data;
-+	while (1) {
-+		struct list_head local_list;
-+		struct blk_plug plug;
- 
--	if (bio_data_dir(io->base_bio) == READ) {
--		crypt_inc_pending(io);
--		if (kcryptd_io_read(io, GFP_NOIO))
--			io->error = -ENOMEM;
--		crypt_dec_pending(io);
--	} else
--		kcryptd_io_write(io);
--}
-+		DECLARE_WAITQUEUE(wait, current);
- 
--static void kcryptd_queue_io(struct dm_crypt_io *io)
--{
--	struct crypt_config *cc = io->cc;
-+		spin_lock_irq(&cc->write_thread_wait.lock);
-+continue_locked:
- 
--	INIT_WORK(&io->work, kcryptd_io);
--	queue_work(cc->io_queue, &io->work);
-+		if (!list_empty(&cc->write_thread_list))
-+			goto pop_from_list;
-+
-+		__set_current_state(TASK_INTERRUPTIBLE);
-+		__add_wait_queue(&cc->write_thread_wait, &wait);
-+
-+		spin_unlock_irq(&cc->write_thread_wait.lock);
-+
-+		if (unlikely(kthread_should_stop())) {
-+			set_task_state(current, TASK_RUNNING);
-+			remove_wait_queue(&cc->write_thread_wait, &wait);
-+			break;
-+		}
-+
-+		schedule();
-+
-+		set_task_state(current, TASK_RUNNING);
-+		spin_lock_irq(&cc->write_thread_wait.lock);
-+		__remove_wait_queue(&cc->write_thread_wait, &wait);
-+		goto continue_locked;
-+
-+pop_from_list:
-+		local_list = cc->write_thread_list;
-+		local_list.next->prev = &local_list;
-+		local_list.prev->next = &local_list;
-+		INIT_LIST_HEAD(&cc->write_thread_list);
-+
-+		spin_unlock_irq(&cc->write_thread_wait.lock);
-+
-+		blk_start_plug(&plug);
-+		do {
-+			struct dm_crypt_io *io = container_of(local_list.next,
-+						struct dm_crypt_io, list);
-+			list_del(&io->list);
-+			kcryptd_io_write(io);
-+		} while (!list_empty(&local_list));
-+		blk_finish_plug(&plug);
-+	}
-+	return 0;
- }
- 
--static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
-+static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io)
- {
- 	struct bio *clone = io->ctx.bio_out;
- 	struct crypt_config *cc = io->cc;
-+	unsigned long flags;
- 
- 	if (unlikely(io->error < 0)) {
- 		crypt_free_buffer_pages(cc, clone);
-@@ -1185,10 +1244,10 @@ static void kcryptd_crypt_write_io_submi
- 
- 	clone->bi_iter.bi_sector = cc->start + io->sector;
- 
--	if (async)
--		kcryptd_queue_io(io);
--	else
--		generic_make_request(clone);
-+	spin_lock_irqsave(&cc->write_thread_wait.lock, flags);
-+	list_add_tail(&io->list, &cc->write_thread_list);
-+	wake_up_locked(&cc->write_thread_wait);
-+	spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags);
- }
- 
- static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
-@@ -1224,7 +1283,7 @@ static void kcryptd_crypt_write_convert(
- 
- 	/* Encryption was already finished, submit io now */
- 	if (crypt_finished) {
--		kcryptd_crypt_write_io_submit(io, 0);
-+		kcryptd_crypt_write_io_submit(io);
- 		io->sector = sector;
- 	}
- 
-@@ -1284,7 +1343,7 @@ static void kcryptd_async_done(struct cr
- 	if (bio_data_dir(io->base_bio) == READ)
- 		kcryptd_crypt_read_done(io);
- 	else
--		kcryptd_crypt_write_io_submit(io, 1);
-+		kcryptd_crypt_write_io_submit(io);
- }
- 
- static void kcryptd_crypt(struct work_struct *work)
-@@ -1431,6 +1490,9 @@ static void crypt_dtr(struct dm_target *
- 	if (!cc)
- 		return;
- 
-+	if (cc->write_thread)
-+		kthread_stop(cc->write_thread);
-+
- 	if (cc->io_queue)
- 		destroy_workqueue(cc->io_queue);
- 	if (cc->crypt_queue)
-@@ -1745,6 +1807,18 @@ static int crypt_ctr(struct dm_target *t
- 		goto bad;
- 	}
- 
-+	init_waitqueue_head(&cc->write_thread_wait);
-+	INIT_LIST_HEAD(&cc->write_thread_list);
-+
-+	cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
-+	if (IS_ERR(cc->write_thread)) {
-+		ret = PTR_ERR(cc->write_thread);
-+		cc->write_thread = NULL;
-+		ti->error = "Couldn't spawn write thread";
-+		goto bad;
-+	}
-+	wake_up_process(cc->write_thread);
-+
- 	ti->num_flush_bios = 1;
- 	ti->discard_zeroes_data_unsupported = true;
- 
-@@ -1779,7 +1853,7 @@ static int crypt_map(struct dm_target *t
- 
- 	if (bio_data_dir(io->base_bio) == READ) {
- 		if (kcryptd_io_read(io, GFP_NOWAIT))
--			kcryptd_queue_io(io);
-+			kcryptd_queue_read(io);
- 	} else
- 		kcryptd_queue_crypt(io);
- 
diff --git a/dm-crypt-remove-io-pool.patch b/dm-crypt-remove-io-pool.patch
deleted file mode 100644
index 1ad444b..0000000
--- a/dm-crypt-remove-io-pool.patch
+++ /dev/null
@@ -1,94 +0,0 @@
-dm-crypt: remove io_pool
-
-Remove io_pool and _crypt_io_pool because they are unused.
-
-Signed-off-by: Mikulas Patocka <mpatocka at redhat.com>
-
----
- drivers/md/dm-crypt.c |   21 +--------------------
- 1 file changed, 1 insertion(+), 20 deletions(-)
-
-Index: linux-3.14/drivers/md/dm-crypt.c
-===================================================================
---- linux-3.14.orig/drivers/md/dm-crypt.c	2014-04-04 21:04:40.000000000 +0200
-+++ linux-3.14/drivers/md/dm-crypt.c	2014-04-04 21:05:40.000000000 +0200
-@@ -120,7 +120,6 @@ struct crypt_config {
- 	 * pool for per bio private data, crypto requests and
- 	 * encryption requeusts/buffer pages
- 	 */
--	mempool_t *io_pool;
- 	mempool_t *req_pool;
- 	mempool_t *page_pool;
- 	struct bio_set *bs;
-@@ -173,8 +172,6 @@ struct crypt_config {
- 
- #define MIN_IOS        16
- 
--static struct kmem_cache *_crypt_io_pool;
--
- static void clone_init(struct dm_crypt_io *, struct bio *);
- static void kcryptd_queue_crypt(struct dm_crypt_io *io);
- static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
-@@ -1061,8 +1058,6 @@ static void crypt_dec_pending(struct dm_
- 
- 	if (io->ctx.req)
- 		crypt_free_req(cc, io->ctx.req, base_bio);
--	if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size))
--		mempool_free(io, cc->io_pool);
- 
- 	bio_endio(base_bio, error);
- }
-@@ -1450,8 +1445,6 @@ static void crypt_dtr(struct dm_target *
- 		mempool_destroy(cc->page_pool);
- 	if (cc->req_pool)
- 		mempool_destroy(cc->req_pool);
--	if (cc->io_pool)
--		mempool_destroy(cc->io_pool);
- 
- 	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
- 		cc->iv_gen_ops->dtr(cc);
-@@ -1658,13 +1658,6 @@
- 	if (ret < 0)
- 		goto bad;
- 
--	ret = -ENOMEM;
--	cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
--	if (!cc->io_pool) {
--		ti->error = "Cannot allocate crypt io mempool";
--		goto bad;
--	}
--
- 	cc->dmreq_start = sizeof(struct ablkcipher_request);
- 	cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
- 	cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
-@@ -1682,6 +1675,7 @@
- 		iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
- 	}
- 
-+	ret = -ENOMEM;
- 	cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
- 			sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
- 	if (!cc->req_pool) {
-@@ -1938,14 +1925,9 @@ static int __init dm_crypt_init(void)
- {
- 	int r;
- 
--	_crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
--	if (!_crypt_io_pool)
--		return -ENOMEM;
--
- 	r = dm_register_target(&crypt_target);
- 	if (r < 0) {
- 		DMERR("register failed %d", r);
--		kmem_cache_destroy(_crypt_io_pool);
- 	}
- 
- 	return r;
-@@ -1954,7 +1936,6 @@ static int __init dm_crypt_init(void)
- static void __exit dm_crypt_exit(void)
- {
- 	dm_unregister_target(&crypt_target);
--	kmem_cache_destroy(_crypt_io_pool);
- }
- 
- module_init(dm_crypt_init);
diff --git a/dm-crypt-sort-requests.patch b/dm-crypt-sort-requests.patch
deleted file mode 100644
index 5dc9886..0000000
--- a/dm-crypt-sort-requests.patch
+++ /dev/null
@@ -1,137 +0,0 @@
-dm-crypt: sort writes
-
-Write requests are sorted in a red-black tree structure and are submitted
-in the sorted order.
-
-In theory the sorting should be performed by the underlying disk scheduler,
-however, in practice the disk scheduler accepts and sorts only 128 requests.
-In order to sort more requests, we need to implement our own sorting.
-
-Signed-off-by: Mikulas Patocka <mpatocka at redhat.com>
-
----
- drivers/md/dm-crypt.c |   50 +++++++++++++++++++++++++++++++++++---------------
- 1 file changed, 35 insertions(+), 15 deletions(-)
-
-Index: linux-3.14/drivers/md/dm-crypt.c
-===================================================================
---- linux-3.14.orig/drivers/md/dm-crypt.c	2014-04-04 21:06:22.000000000 +0200
-+++ linux-3.14/drivers/md/dm-crypt.c	2014-04-04 21:06:55.000000000 +0200
-@@ -22,6 +22,7 @@
- #include <linux/backing-dev.h>
- #include <linux/atomic.h>
- #include <linux/scatterlist.h>
-+#include <linux/rbtree.h>
- #include <asm/page.h>
- #include <asm/unaligned.h>
- #include <crypto/hash.h>
-@@ -60,7 +61,7 @@ struct dm_crypt_io {
- 	int error;
- 	sector_t sector;
- 
--	struct list_head list;
-+	struct rb_node rb_node;
- } CRYPTO_MINALIGN_ATTR;
- 
- struct dm_crypt_request {
-@@ -133,7 +134,7 @@ struct crypt_config {
- 
- 	struct task_struct *write_thread;
- 	wait_queue_head_t write_thread_wait;
--	struct list_head write_thread_list;
-+	struct rb_root write_tree;
- 
- 	char *cipher;
- 	char *cipher_string;
-@@ -1177,7 +1178,7 @@ static int dmcrypt_write(void *data)
- {
- 	struct crypt_config *cc = data;
- 	while (1) {
--		struct list_head local_list;
-+		struct rb_root write_tree;
- 		struct blk_plug plug;
- 
- 		DECLARE_WAITQUEUE(wait, current);
-@@ -1185,7 +1186,7 @@ static int dmcrypt_write(void *data)
- 		spin_lock_irq(&cc->write_thread_wait.lock);
- continue_locked:
- 
--		if (!list_empty(&cc->write_thread_list))
-+		if (!RB_EMPTY_ROOT(&cc->write_tree))
- 			goto pop_from_list;
- 
- 		__set_current_state(TASK_INTERRUPTIBLE);
-@@ -1207,20 +1208,23 @@ continue_locked:
- 		goto continue_locked;
- 
- pop_from_list:
--		local_list = cc->write_thread_list;
--		local_list.next->prev = &local_list;
--		local_list.prev->next = &local_list;
--		INIT_LIST_HEAD(&cc->write_thread_list);
--
-+		write_tree = cc->write_tree;
-+		cc->write_tree = RB_ROOT;
- 		spin_unlock_irq(&cc->write_thread_wait.lock);
- 
-+		BUG_ON(rb_parent(write_tree.rb_node));
-+
-+		/*
-+		 * Note: we cannot walk the tree here with rb_next because
-+		 * the structures may be freed when kcryptd_io_write is called.
-+		 */
- 		blk_start_plug(&plug);
- 		do {
--			struct dm_crypt_io *io = container_of(local_list.next,
--						struct dm_crypt_io, list);
--			list_del(&io->list);
-+			struct dm_crypt_io *io = rb_entry(rb_first(&write_tree),
-+						struct dm_crypt_io, rb_node);
-+			rb_erase(&io->rb_node, &write_tree);
- 			kcryptd_io_write(io);
--		} while (!list_empty(&local_list));
-+		} while (!RB_EMPTY_ROOT(&write_tree));
- 		blk_finish_plug(&plug);
- 	}
- 	return 0;
-@@ -1231,6 +1235,8 @@ static void kcryptd_crypt_write_io_submi
- 	struct bio *clone = io->ctx.bio_out;
- 	struct crypt_config *cc = io->cc;
- 	unsigned long flags;
-+	sector_t sector;
-+	struct rb_node **p, *parent;
- 
- 	if (unlikely(io->error < 0)) {
- 		crypt_free_buffer_pages(cc, clone);
-@@ -1245,7 +1251,21 @@ static void kcryptd_crypt_write_io_submi
- 	clone->bi_iter.bi_sector = cc->start + io->sector;
- 
- 	spin_lock_irqsave(&cc->write_thread_wait.lock, flags);
--	list_add_tail(&io->list, &cc->write_thread_list);
-+	p = &cc->write_tree.rb_node;
-+	parent = NULL;
-+	sector = io->sector;
-+	while (*p) {
-+		parent = *p;
-+#define io_node rb_entry(parent, struct dm_crypt_io, rb_node)
-+		if (sector < io_node->sector)
-+			p = &io_node->rb_node.rb_left;
-+		else
-+			p = &io_node->rb_node.rb_right;
-+#undef io_node
-+	}
-+	rb_link_node(&io->rb_node, parent, p);
-+	rb_insert_color(&io->rb_node, &cc->write_tree);
-+
- 	wake_up_locked(&cc->write_thread_wait);
- 	spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags);
- }
-@@ -1808,7 +1828,7 @@ static int crypt_ctr(struct dm_target *t
- 	}
- 
- 	init_waitqueue_head(&cc->write_thread_wait);
--	INIT_LIST_HEAD(&cc->write_thread_list);
-+	cc->write_tree = RB_ROOT;
- 
- 	cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
- 	if (IS_ERR(cc->write_thread)) {
diff --git a/dm-crypt-unbound-workqueue.patch b/dm-crypt-unbound-workqueue.patch
deleted file mode 100644
index 0a0a0ad..0000000
--- a/dm-crypt-unbound-workqueue.patch
+++ /dev/null
@@ -1,24 +0,0 @@
-dm-crypt: use unbound workqueue for request processing
-
-Use unbound workqueue so that work is automatically ballanced between
-available CPUs.
-
-Signed-off-by: Mikulas Patocka <mpatocka at redhat.com>
-
----
- drivers/md/dm-crypt.c |    2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-Index: linux-3.14-rc8/drivers/md/dm-crypt.c
-===================================================================
---- linux-3.14-rc8.orig/drivers/md/dm-crypt.c	2014-03-25 22:57:00.000000000 +0100
-+++ linux-3.14-rc8/drivers/md/dm-crypt.c	2014-03-28 17:09:14.000000000 +0100
-@@ -1800,7 +1800,7 @@ static int crypt_ctr(struct dm_target *t
- 	}
- 
- 	cc->crypt_queue = alloc_workqueue("kcryptd",
--					  WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
-+					  WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
- 	if (!cc->crypt_queue) {
- 		ti->error = "Couldn't create kcryptd queue";
- 		goto bad;
================================================================

---- gitweb:

http://git.pld-linux.org/gitweb.cgi/packages/kernel.git/commitdiff/67ed654c0c1b8b72767d538b14969b2174fe70c9



More information about the pld-cvs-commit mailing list