<html><head><meta name="color-scheme" content="light dark"></head><body><pre style="word-wrap: break-word; white-space: pre-wrap;">---
 drivers/md/dm-memcache.c |  292 +++++++++++++++++++++++++++++++++++++++++++++++
 drivers/md/dm-memcache.h |   57 +++++++++
 2 files changed, 349 insertions(+)

Index: linux/drivers/md/dm-memcache.c
===================================================================
--- /dev/null	1970-01-01 00:00:00.000000000 +0000
+++ linux/drivers/md/dm-memcache.c	2007-06-06 20:40:08.000000000 +0100
@@ -0,0 +1,292 @@
+/*
+ * Copyright (C) 2006,2007 Red Hat GmbH
+ *
+ * Module Author: Heinz Mauelshagen (Mauelshagen@RedHat.com)
+ *
+ * Allocate/free total_pages to a per client page pool.
+ * Allocate/free memory objects with chunks (1..n) of pages_per_chunk pages
+ * hanging off.
+ *
+ * This file is released under the GPL.
+ */
+
+#define	DM_MEM_CACHE_VERSION	"0.2"
+
+#include "dm.h"
+#include "dm-io.h"
+#include "dm-memcache.h"
+
+struct dm_memcache_client {
+	spinlock_t lock;
+	mempool_t *objs_pool;
+	struct page_list *free_list;
+	unsigned objects;
+	unsigned chunks;
+	unsigned free_pages;
+	unsigned total_pages;
+};
+
+/*
+ * Free pages and page_list elements of client.
+ */
+static void free_cache_pages(struct page_list *list)
+{
+	while (list) {
+		struct page_list *pl = list;
+
+		list = pl-&gt;next;
+		BUG_ON(!pl-&gt;page);
+		__free_page(pl-&gt;page);
+		kfree(pl);
+	}
+}
+
+/*
+ * Alloc number of pages and page_list elements as required by client.
+ */
+static struct page_list *alloc_cache_pages(unsigned pages)
+{
+	struct page_list *pl, *ret = NULL;
+	struct page *page;
+
+	while (pages--) {
+		page = alloc_page(GFP_NOIO);
+		if (!page)
+			goto err;
+
+		pl = kmalloc(sizeof(*pl), GFP_NOIO);
+		if (!pl) {
+			__free_page(page);
+			goto err;
+		}
+
+		pl-&gt;page = page;
+		pl-&gt;next = ret;
+		ret = pl;
+	}
+
+	return ret;
+
+   err:
+	free_cache_pages(ret);
+	return NULL;
+}
+
+/*
+ * Allocate page_list elements from the pool to chunks of the mem object
+ */
+static void alloc_chunks(struct dm_memcache_client *cl,
+			 struct dm_memcache_object *obj,
+			 unsigned pages_per_chunk)
+{
+	unsigned chunks = cl-&gt;chunks;
+	unsigned long flags;
+
+	local_irq_save(flags);
+	local_irq_disable();
+	while (chunks--) {
+		unsigned p = pages_per_chunk;
+
+		obj[chunks].pl = NULL;
+
+		while (p--) {
+			struct page_list *pl;
+
+			/* Take next element from free list */
+			spin_lock(&amp;cl-&gt;lock);
+			pl = cl-&gt;free_list;
+			BUG_ON(!pl);
+			cl-&gt;free_list = pl-&gt;next;
+			spin_unlock(&amp;cl-&gt;lock);
+
+			pl-&gt;next = obj[chunks].pl;
+			obj[chunks].pl = pl;
+		}
+	}
+
+	local_irq_restore(flags);
+}
+
+/*
+ * Free page_list elements putting them back onto free list
+ */
+static void free_chunks(struct dm_memcache_client *cl,
+			struct dm_memcache_object *obj)
+{
+	unsigned chunks = cl-&gt;chunks;
+	unsigned long flags;
+	struct page_list *next, *pl;
+
+	local_irq_save(flags);
+	local_irq_disable();
+	while (chunks--) {
+		for (pl = obj[chunks].pl; pl; pl = next) {
+			next = pl-&gt;next;
+
+			spin_lock(&amp;cl-&gt;lock);
+			pl-&gt;next = cl-&gt;free_list;
+			cl-&gt;free_list = pl;
+			cl-&gt;free_pages++;
+			spin_unlock(&amp;cl-&gt;lock);
+		}
+	}
+
+	local_irq_restore(flags);
+}
+
+/*
+ * Create/destroy dm memory cache client resources.
+ */
+struct dm_memcache_client *
+dm_memcache_client_create(unsigned total_pages, unsigned objects,
+			   unsigned chunks)
+{
+	struct dm_memcache_client *client;
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client)
+		return ERR_PTR(-ENOMEM);
+
+	client-&gt;objs_pool = mempool_create_kmalloc_pool(objects, chunks * sizeof(struct dm_memcache_object));
+	if (!client-&gt;objs_pool)
+		goto err;
+
+	client-&gt;free_list = alloc_cache_pages(total_pages);
+	if (!client-&gt;free_list)
+		goto err1;
+
+	spin_lock_init(&amp;client-&gt;lock);
+	client-&gt;objects = objects;
+	client-&gt;chunks = chunks;
+	client-&gt;free_pages = client-&gt;total_pages = total_pages;
+	return client;
+
+   err1:
+	mempool_destroy(client-&gt;objs_pool);
+   err:
+	kfree(client);
+	return ERR_PTR(-ENOMEM);
+}
+
+void dm_memcache_client_destroy(struct dm_memcache_client *cl)
+{
+	BUG_ON(cl-&gt;free_pages != cl-&gt;total_pages);
+	free_cache_pages(cl-&gt;free_list);
+	mempool_destroy(cl-&gt;objs_pool);
+	kfree(cl);
+}
+
+/*
+ * Grow a clients cache by an amount of pages.
+ *
+ * Don't call from interrupt context!
+ */
+int dm_memcache_grow(struct dm_memcache_client *cl,
+		      unsigned pages_per_chunk)
+{
+	unsigned pages = cl-&gt;chunks * pages_per_chunk;
+	struct page_list *pl = alloc_cache_pages(pages), *last = pl;
+
+	if (!pl)
+		return -ENOMEM;
+
+	while (last-&gt;next)
+		last = last-&gt;next;
+
+	spin_lock_irq(&amp;cl-&gt;lock);
+	last-&gt;next = cl-&gt;free_list;
+	cl-&gt;free_list = pl;
+	cl-&gt;free_pages += pages;
+	cl-&gt;total_pages += pages;
+	cl-&gt;objects++;
+	spin_unlock_irq(&amp;cl-&gt;lock);
+
+	mempool_resize(cl-&gt;objs_pool, cl-&gt;objects, GFP_NOIO);
+	return 0;
+}
+
+/* Shrink a clients cache by an amount of pages */
+int dm_memcache_shrink(struct dm_memcache_client *cl,
+			unsigned pages_per_chunk)
+{
+	int r = 0;
+	unsigned pages = cl-&gt;chunks * pages_per_chunk, p = pages;
+	unsigned long flags;
+	struct page_list *last = NULL, *pl, *pos;
+
+	spin_lock_irqsave(&amp;cl-&gt;lock, flags);
+	pl = pos = cl-&gt;free_list;
+	while (p-- &amp;&amp; pos-&gt;next) {
+		last = pos;
+		pos = pos-&gt;next;
+	}
+
+	if (++p)
+		r = -ENOMEM;
+	else {
+		cl-&gt;free_list = pos;
+		cl-&gt;free_pages -= pages;
+		cl-&gt;total_pages -= pages;
+		cl-&gt;objects--;
+		last-&gt;next = NULL;
+	}
+	spin_unlock_irqrestore(&amp;cl-&gt;lock, flags);
+
+	if (!r) {
+		free_cache_pages(pl);
+		mempool_resize(cl-&gt;objs_pool, cl-&gt;objects, GFP_NOIO);
+	}
+
+	return r;
+}
+
+/*
+ * Allocate/free a memory object
+ *
+ * Can be called from interrupt context
+ */
+struct dm_memcache_object *dm_memcache_alloc(struct dm_memcache_client *cl,
+					       unsigned pages_per_chunk)
+{
+	int r = 0;
+	unsigned pages = cl-&gt;chunks * pages_per_chunk;
+	unsigned long flags;
+	struct dm_memcache_object *obj;
+
+	obj = mempool_alloc(cl-&gt;objs_pool, GFP_NOIO);
+	if (!obj)
+		return ERR_PTR(-ENOMEM);
+
+	spin_lock_irqsave(&amp;cl-&gt;lock, flags);
+	if (pages &gt; cl-&gt;free_pages)
+		r = -ENOMEM;
+	else
+		cl-&gt;free_pages -= pages;
+	spin_unlock_irqrestore(&amp;cl-&gt;lock, flags);
+
+	if (r) {
+		mempool_free(obj, cl-&gt;objs_pool);
+		return ERR_PTR(r);
+	}
+
+	alloc_chunks(cl, obj, pages_per_chunk);
+	return obj;
+}
+
+void dm_memcache_free(struct dm_memcache_client *cl,
+		       struct dm_memcache_object *obj)
+{
+	free_chunks(cl, obj);
+	mempool_free(obj, cl-&gt;objs_pool);
+}
+
+EXPORT_SYMBOL(dm_memcache_client_create);
+EXPORT_SYMBOL(dm_memcache_client_destroy);
+EXPORT_SYMBOL(dm_memcache_alloc);
+EXPORT_SYMBOL(dm_memcache_free);
+EXPORT_SYMBOL(dm_memcache_grow);
+EXPORT_SYMBOL(dm_memcache_shrink);
+
+MODULE_DESCRIPTION(DM_NAME " dm memory cache");
+MODULE_AUTHOR("Heinz Mauelshagen &lt;mauelshagen@redhat.com&gt;");
+MODULE_LICENSE("GPL");
Index: linux/drivers/md/dm-memcache.h
===================================================================
--- /dev/null	1970-01-01 00:00:00.000000000 +0000
+++ linux/drivers/md/dm-memcache.h	2007-06-06 20:40:08.000000000 +0100
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2006,2007 Red Hat GmbH
+ *
+ * Module Author: Heinz Mauelshagen (Mauelshagen@RedHat.com)
+ *
+ * Allocate/free total_pages to a per client page pool.
+ * Allocate/free memory objects with chunks (1..n) of pages_per_chunk pages
+ * hanging off.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_MEMCACHE_H
+#define DM_MEMCACHE_H
+
+#define	DM_MEMCACHE_H_VERSION	"0.1"
+
+#include "dm.h"
+
+static inline struct page_list *pl_elem(struct page_list *pl, unsigned p)
+{
+	while(pl &amp;&amp; p--)
+		pl = pl-&gt;next;
+
+	return pl;
+}
+
+struct dm_memcache_object {
+	struct page_list *pl; /* Dynamically allocated array */
+	void *private;	      /* Caller context reference */
+};
+
+struct dm_memcache_client;
+
+/*
+ * Create/destroy dm memory cache client resources.
+ */
+struct dm_memcache_client *dm_memcache_client_create(
+	unsigned total_pages, unsigned objects, unsigned chunks);
+void dm_memcache_client_destroy(struct dm_memcache_client *client);
+
+/*
+ * Grow/shrink a dm memory cache client resources.
+ */
+int dm_memcache_grow(struct dm_memcache_client *client, unsigned pages);
+int dm_memcache_shrink(struct dm_memcache_client *client, unsigned pages);
+
+/*
+ * Allocate/free a memory object
+ */
+struct dm_memcache_object *
+dm_memcache_alloc(struct dm_memcache_client *client,
+		   unsigned pages_per_chunk);
+void dm_memcache_free(struct dm_memcache_client *client,
+		       struct dm_memcache_object *object);
+
+#endif
</pre></body></html>