[Ocfs2-commits] khackel commits r2214 - trunk/fs/ocfs2/dlm

svn-commits at oss.oracle.com svn-commits at oss.oracle.com
Tue May 3 19:41:41 CDT 2005


Author: khackel
Signed-off-by: mfasheh
Date: 2005-05-03 19:41:39 -0500 (Tue, 03 May 2005)
New Revision: 2214

Modified:
   trunk/fs/ocfs2/dlm/dlmcommon.h
   trunk/fs/ocfs2/dlm/dlmdomain.c
   trunk/fs/ocfs2/dlm/dlmmaster.c
Log:
* added a slab cache for mles
* mles are no longer variable size
* fixed a bug in which the mle name array was no longer at the
  bottom of the structure (no longer variable length so doesn't matter)
* fixed 2 mle memory leaks in dlm_get_lock_resource
* fixed a double lock in dlm_get_lock_resource in the migrating case

Signed-off-by: mfasheh



Modified: trunk/fs/ocfs2/dlm/dlmcommon.h
===================================================================
--- trunk/fs/ocfs2/dlm/dlmcommon.h	2005-05-03 20:24:56 UTC (rev 2213)
+++ trunk/fs/ocfs2/dlm/dlmcommon.h	2005-05-04 00:41:39 UTC (rev 2214)
@@ -295,7 +295,7 @@
 typedef struct _dlm_lock_name
 {
 	u8 len;
-	u8 name[0];   // [DLM_LOCKID_NAME_MAX]
+	u8 name[DLM_LOCKID_NAME_MAX];
 } dlm_lock_name;
 
 /* good god this needs to be trimmed down */
@@ -316,12 +316,12 @@
 	u8 new_master;
 	u8 error;
 	enum dlm_mle_type type;    // BLOCK or MASTER
+	struct hb_callback_func mle_hb_up;
+	struct hb_callback_func mle_hb_down;
 	union {
 		dlm_lock_resource *res;
 		dlm_lock_name name;
 	} u;
-	struct hb_callback_func mle_hb_up;
-	struct hb_callback_func mle_hb_down;
 } dlm_master_list_entry;
 
 typedef struct _dlm_node_iter
@@ -911,6 +911,8 @@
 }
 
 
+int dlm_init_mle_cache(void);
+void dlm_destroy_mle_cache(void);
 void dlm_mle_node_down(dlm_ctxt *dlm, dlm_master_list_entry *mle,
 		       struct nm_node *node, int idx);
 void dlm_mle_node_up(dlm_ctxt *dlm, dlm_master_list_entry *mle,

Modified: trunk/fs/ocfs2/dlm/dlmdomain.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmdomain.c	2005-05-03 20:24:56 UTC (rev 2213)
+++ trunk/fs/ocfs2/dlm/dlmdomain.c	2005-05-04 00:41:39 UTC (rev 2214)
@@ -1338,10 +1338,16 @@
 
 	dlm_print_version();
 
-	status = dlm_register_net_handlers();
+	status = dlm_init_mle_cache();
 	if (status)
 		return -1;
 
+	status = dlm_register_net_handlers();
+	if (status) {
+		dlm_destroy_mle_cache();
+		return -1;
+	}
+
 	dlm_create_dlm_debug_proc_entry();
 
 	return 0;
@@ -1350,6 +1356,7 @@
 static void __exit dlm_exit (void)
 {
 	dlm_unregister_net_handlers();
+	dlm_destroy_mle_cache();
 }				/* dlm_driver_exit */
 
 MODULE_AUTHOR("Oracle");

Modified: trunk/fs/ocfs2/dlm/dlmmaster.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmmaster.c	2005-05-03 20:24:56 UTC (rev 2213)
+++ trunk/fs/ocfs2/dlm/dlmmaster.c	2005-05-04 00:41:39 UTC (rev 2214)
@@ -115,6 +115,9 @@
 #endif
 
 
+kmem_cache_t *dlm_mle_cache = NULL;
+
+
 static void dlm_mle_release(struct kref *kref);
 static void dlm_init_mle(dlm_master_list_entry *mle,
 			enum dlm_mle_type type,
@@ -228,7 +231,6 @@
 	kref_get(&mle->mle_refs);
 }
 
-
 static void dlm_init_mle(dlm_master_list_entry *mle,
 			enum dlm_mle_type type,
 			dlm_ctxt *dlm,
@@ -344,6 +346,23 @@
 }
 
 
+int dlm_init_mle_cache(void)
+{
+	dlm_mle_cache = kmem_cache_create("dlm_mle_cache", 
+					  sizeof(dlm_master_list_entry), 
+					  0, SLAB_HWCACHE_ALIGN, 
+					  NULL, NULL);
+	if (dlm_mle_cache == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+void dlm_destroy_mle_cache(void)
+{
+	if (dlm_mle_cache)
+		kmem_cache_destroy(dlm_mle_cache);
+}
+
 static void dlm_mle_release(struct kref *kref)
 {
 	dlm_master_list_entry *mle;
@@ -378,7 +397,7 @@
 
 	/* NOTE: kfree under spinlock here.
 	 * if this is bad, we can move this to a freelist. */
-	kfree(mle);
+	kmem_cache_free(dlm_mle_cache, mle);
 }
 
 
@@ -562,33 +581,27 @@
 	tmpres = __dlm_lookup_lockres(dlm, lockid, namelen);
 	if (tmpres) {
 		spin_unlock(&dlm->spinlock);
-
 		mlog(0, "found in hash!\n");
-
 		if (mle)
-			kfree(mle);
-
+			kmem_cache_free(dlm_mle_cache, mle);
 		if (res)
 			dlm_lockres_put(res);
-
 		return tmpres;
 	}
 
 	if (!res) {
 		spin_unlock(&dlm->spinlock);
 		mlog(0, "allocating a new resource\n");
-
 		/* nothing found and we need to allocate one. */
-		mle = kmalloc(sizeof(dlm_master_list_entry), GFP_KERNEL);
+		mle = (dlm_master_list_entry *)kmem_cache_alloc(dlm_mle_cache,
+								GFP_KERNEL);
 		if (!mle)
 			return NULL;
-
 		res = dlm_new_lockres(dlm, lockid, namelen);
 		if (!res) {
-			kfree(mle);
+			kmem_cache_free(dlm_mle_cache, mle);
 			return NULL;
 		}
-
 		goto lookup;
 	}
 
@@ -603,6 +616,8 @@
 		spin_unlock(&res->spinlock);
 		spin_unlock(&dlm->spinlock);
 		/* lockres still marked IN_PROGRESS */
+		/* need to free the unused mle */
+		kmem_cache_free(dlm_mle_cache, mle);
 		goto wake_waiters;
 	}
 
@@ -621,9 +636,9 @@
 			 * "current" master (mle->master). */
 		
 			spin_unlock(&dlm->master_lock);
+			assert_spin_locked(&dlm->spinlock);
 
 			/* set the lockres owner and hash it */
-			spin_lock(&dlm->spinlock);
 			spin_lock(&res->spinlock);
 			dlm_set_lockres_owner(dlm, res, tmpmle->master);
 			__dlm_insert_lockres(dlm, res);
@@ -634,6 +649,8 @@
 			dlm_mle_detach_hb_events(dlm, tmpmle);
 			dlm_put_mle(tmpmle);
 
+			/* need to free the unused mle */
+			kmem_cache_free(dlm_mle_cache, mle);
 			goto wake_waiters;
 		}
 	}
@@ -647,12 +664,7 @@
 	/* at this point there is either a DLM_MLE_BLOCK or a
 	 * DLM_MLE_MASTER on the master list, so it's safe to add the
 	 * lockres to the hashtable.  anyone who finds the lock will
-	 * still have to wait on the IN_PROGRESS.  also, any new nodes
-	 * that try to join at this point will have to wait until my
-	 * dlm->master_lock list is empty, so they cannot possibly do
-	 * any master requests yet... TODO ?? should i have a special
-	 * type of mle just for joining nodes ??  ?? could allow them
-	 * to come in and put their mle on the list and sleep ?? */
+	 * still have to wait on the IN_PROGRESS. */
 
 	/* finally add the lockres to its hash bucket */
 	__dlm_insert_lockres(dlm, res);
@@ -660,7 +672,7 @@
 
 	if (blocked) {
 		/* must wait for lock to be mastered elsewhere */
-		kfree(mle);
+		kmem_cache_free(dlm_mle_cache, mle);
 		mle = tmpmle;
 		goto wait;
 	}
@@ -941,7 +953,7 @@
 			     "being recovered\n");
 			response = DLM_MASTER_RESP_ERROR;
 			if (mle)
-				kfree(mle);
+				kmem_cache_free(dlm_mle_cache, mle);
 			goto send_response;
 		}
 
@@ -951,7 +963,7 @@
 			// mlog(0, "this node is the master\n");
 			response = DLM_MASTER_RESP_YES;
 			if (mle)
-				kfree(mle);
+				kmem_cache_free(dlm_mle_cache, mle);
 
 			/* this node is the owner.
 			 * there is some extra work that needs to 
@@ -973,7 +985,7 @@
 			// mlog(0, "node %u is the master\n", res->owner);
 			response = DLM_MASTER_RESP_NO;
 			if (mle)
-				kfree(mle);
+				kmem_cache_free(dlm_mle_cache, mle);
 			goto send_response;
 		}
 
@@ -1025,7 +1037,7 @@
 		/* keep the mle attached to heartbeat events */
 		dlm_put_mle(tmpmle);
 		if (mle)
-			kfree(mle);
+			kmem_cache_free(dlm_mle_cache, mle);
 		goto send_response;
 	}
 
@@ -1044,8 +1056,8 @@
 			spin_unlock(&dlm->master_lock);
 			spin_unlock(&dlm->spinlock);
 
-			mle = kmalloc(sizeof(dlm_master_list_entry) + 
-				      namelen, GFP_KERNEL);
+			mle = (dlm_master_list_entry *)
+				kmem_cache_alloc(dlm_mle_cache, GFP_KERNEL);
 			if (!mle) {
 				// bad bad bad... this sucks.
 				response = DLM_MASTER_RESP_ERROR;
@@ -1462,7 +1474,8 @@
 		goto leave;
 	}
 
-	mle = kmalloc(sizeof(dlm_master_list_entry) + namelen, GFP_KERNEL);
+	mle = (dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
+							 GFP_KERNEL);
 	if (!mle) {
 		ret = -ENOMEM;
 		goto leave;
@@ -1528,7 +1541,7 @@
 			dlm_mle_detach_hb_events(dlm, mle);
 			dlm_put_mle(mle);
 		} else if (mle) {
-			kfree(mle);
+			kmem_cache_free(dlm_mle_cache, mle);
 		}
 		goto leave;
 	}
@@ -1808,7 +1821,9 @@
 	namelen = migrate->namelen;
 
 	/* preallocate.. if this fails, abort */
-	mle = kmalloc(sizeof(dlm_master_list_entry) + namelen, GFP_KERNEL);
+	mle = (dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
+							 GFP_KERNEL);
+
 	if (!mle) {
 		ret = -ENOMEM;
 		goto leave;
@@ -1828,7 +1843,7 @@
 			spin_unlock(&res->spinlock);
 			mlog(ML_ERROR, "Got a migrate request, but the "
 			     "lockres is marked as recovering!");
-			kfree(mle);
+			kmem_cache_free(dlm_mle_cache, mle);
 			ret = -EINVAL; /* need a better solution */
 			goto unlock;
 		}



More information about the Ocfs2-commits mailing list