[Ocfs2-commits] khackel commits r2658 - branches/ocfs2-1.2/fs/ocfs2/dlm

svn-commits at oss.oracle.com svn-commits at oss.oracle.com
Mon Oct 17 15:48:06 CDT 2005


Author: khackel
Signed-off-by: mfasheh
Date: 2005-10-17 15:48:05 -0500 (Mon, 17 Oct 2005)
New Revision: 2658

Modified:
   branches/ocfs2-1.2/fs/ocfs2/dlm/dlmmaster.c
   branches/ocfs2-1.2/fs/ocfs2/dlm/dlmrecovery.c
Log:
* Merge revisions 2653 thru 2657 from trunk
	- 2653: fix deallocation paths in dlm_get_lock_resource
	- 2654: dlm_get_mle was not taking dlm->spinlock
	- 2655: bad dereference of a master list entry
	- 2656: lock resources were stuck in IN_PROGRESS state
	- 2657: recovered lockres would have bad refcount

Signed-off-by: mfasheh



Modified: branches/ocfs2-1.2/fs/ocfs2/dlm/dlmmaster.c
===================================================================
--- branches/ocfs2-1.2/fs/ocfs2/dlm/dlmmaster.c	2005-10-15 01:00:36 UTC (rev 2657)
+++ branches/ocfs2-1.2/fs/ocfs2/dlm/dlmmaster.c	2005-10-17 20:48:05 UTC (rev 2658)
@@ -658,7 +658,8 @@
 					  int flags)
 {
 	struct dlm_lock_resource *tmpres=NULL, *res=NULL;
-	struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
+	struct dlm_master_list_entry *mle = NULL;
+	struct dlm_master_list_entry *alloc_mle = NULL;
 	int blocked = 0;
 	int ret, nodenum;
 	struct dlm_node_iter iter;
@@ -676,26 +677,23 @@
 	if (tmpres) {
 		spin_unlock(&dlm->spinlock);
 		mlog(0, "found in hash!\n");
-		if (mle)
-			kmem_cache_free(dlm_mle_cache, mle);
 		if (res)
 			dlm_lockres_put(res);
-		return tmpres;
+		res = tmpres;
+		goto leave;
 	}
 
 	if (!res) {
 		spin_unlock(&dlm->spinlock);
 		mlog(0, "allocating a new resource\n");
 		/* nothing found and we need to allocate one. */
-		mle = (struct dlm_master_list_entry *)kmem_cache_alloc(dlm_mle_cache,
-								GFP_KERNEL);
-		if (!mle)
-			return NULL;
+		alloc_mle = (struct dlm_master_list_entry *)
+			kmem_cache_alloc(dlm_mle_cache, GFP_KERNEL);
+		if (!alloc_mle)
+			goto leave;
 		res = dlm_new_lockres(dlm, lockid, namelen);
-		if (!res) {
-			kmem_cache_free(dlm_mle_cache, mle);
-			return NULL;
-		}
+		if (!res)
+			goto leave;
 		goto lookup;
 	}
 
@@ -710,8 +708,6 @@
 		spin_unlock(&res->spinlock);
 		spin_unlock(&dlm->spinlock);
 		/* lockres still marked IN_PROGRESS */
-		/* need to free the unused mle */
-		kmem_cache_free(dlm_mle_cache, mle);
 		goto wake_waiters;
 	}
 
@@ -719,12 +715,12 @@
 	spin_lock(&dlm->master_lock);
 
 	/* if we found a block, wait for lock to be mastered by another node */
-	blocked = dlm_find_mle(dlm, &tmpmle, (char *)lockid, namelen);
+	blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
 	if (blocked) {
-		if (tmpmle->type == DLM_MLE_MASTER) {
+		if (mle->type == DLM_MLE_MASTER) {
 			mlog(ML_ERROR, "master entry for nonexistent lock!\n");
 			BUG();
-		} else if (tmpmle->type == DLM_MLE_MIGRATION) {
+		} else if (mle->type == DLM_MLE_MIGRATION) {
 			/* migration is in progress! */
 			/* the good news is that we now know the
 			 * "current" master (mle->master). */
@@ -734,22 +730,22 @@
 
 			/* set the lockres owner and hash it */
 			spin_lock(&res->spinlock);
-			dlm_set_lockres_owner(dlm, res, tmpmle->master);
+			dlm_set_lockres_owner(dlm, res, mle->master);
 			__dlm_insert_lockres(dlm, res);
 			spin_unlock(&res->spinlock);
 			spin_unlock(&dlm->spinlock);
 
 			/* master is known, detach */
-			dlm_mle_detach_hb_events(dlm, tmpmle);
-			dlm_put_mle(tmpmle);
-
-			/* need to free the unused mle */
-			kmem_cache_free(dlm_mle_cache, mle);
+			dlm_mle_detach_hb_events(dlm, mle);
+			dlm_put_mle(mle);
+			mle = NULL;
 			goto wake_waiters;
 		}
-	}
-	if (!blocked) {
+	} else {
 		/* go ahead and try to master lock on this node */
+		mle = alloc_mle;
+		/* make sure this does not get freed below */
+		alloc_mle = NULL;
 		dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
 		list_add(&mle->list, &dlm->master_list);
 	}
@@ -761,15 +757,17 @@
 
 	/* finally add the lockres to its hash bucket */
 	__dlm_insert_lockres(dlm, res);
+	/* get an extra ref on the mle in case this is a BLOCK
+	 * if so, the creator of the BLOCK may try to put the last
+	 * ref at this time in the assert master handler, so we
+	 * need an extra one to keep from a bad ptr deref. */
+	dlm_get_mle(mle);
 	spin_unlock(&dlm->master_lock);
 	spin_unlock(&dlm->spinlock);
 
-	if (blocked) {
-		/* must wait for lock to be mastered elsewhere */
-		kmem_cache_free(dlm_mle_cache, mle);
-		mle = tmpmle;
+	/* must wait for lock to be mastered elsewhere */
+	if (blocked)
 		goto wait;
-	}
 
 redo_request:
 	ret = -EINVAL;
@@ -810,6 +808,8 @@
 	/* master is known, detach if not already detached */
 	dlm_mle_detach_hb_events(dlm, mle);
 	dlm_put_mle(mle);
+	/* put the extra ref */
+	dlm_put_mle(mle);
 
 wake_waiters:
 	spin_lock(&res->spinlock);
@@ -817,6 +817,11 @@
 	spin_unlock(&res->spinlock);
 	wake_up(&res->wq);
 
+leave:
+	/* need to free the unused mle */
+	if (alloc_mle)
+		kmem_cache_free(dlm_mle_cache, alloc_mle);
+
 	return res;
 }
 
@@ -1590,7 +1595,10 @@
 	// mlog(0, "woo!  got an assert_master from node %u!\n",
 	// 	     assert->node_idx);
 	if (mle) {
+		int block;
+		
 		spin_lock(&mle->spinlock);
+		block = !!(mle->type == DLM_MLE_BLOCK);
 		mle->master = assert->node_idx;
 		atomic_set(&mle->woken, 1);
 		wake_up(&mle->wq);
@@ -1610,11 +1618,14 @@
 		/* master is known, detach if not already detached */
 		dlm_mle_detach_hb_events(dlm, mle);
 		dlm_put_mle(mle);
-		/* the assert master message now balances the extra
-		 * ref given by the master request message.
-		 * if this is the last put, it will be removed
-		 * from the list. */
-		dlm_put_mle(mle);
+		
+		if (block) {
+			/* the assert master message now balances the extra
+		 	 * ref given by the master request message.
+		 	 * if this is the last put, it will be removed
+		 	 * from the list. */
+			dlm_put_mle(mle);
+		}
 	}
 
 done:
@@ -1887,10 +1898,14 @@
 
 	/* get an extra reference on the mle.
 	 * otherwise the assert_master from the new
-	 * master will destroy this. */
+	 * master will destroy this.
+	 * also, make sure that all callers of dlm_get_mle
+	 * take both dlm->spinlock and dlm->master_lock */
+	spin_lock(&dlm->spinlock);
 	spin_lock(&dlm->master_lock);
 	dlm_get_mle(mle);
 	spin_unlock(&dlm->master_lock);
+	spin_unlock(&dlm->spinlock);
 
 	/* notify new node and send all lock state */
 	/* call send_one_lockres with migration flag.

Modified: branches/ocfs2-1.2/fs/ocfs2/dlm/dlmrecovery.c
===================================================================
--- branches/ocfs2-1.2/fs/ocfs2/dlm/dlmrecovery.c	2005-10-15 01:00:36 UTC (rev 2657)
+++ branches/ocfs2-1.2/fs/ocfs2/dlm/dlmrecovery.c	2005-10-17 20:48:05 UTC (rev 2658)
@@ -1094,6 +1094,17 @@
 		spin_lock(&dlm->spinlock);
 		__dlm_insert_lockres(dlm, res);
 		spin_unlock(&dlm->spinlock);
+
+		/* now that the new lockres is inserted,
+		 * make it usable by other processes */
+		spin_lock(&res->spinlock);
+		res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
+		spin_unlock(&res->spinlock);
+
+		/* add an extra ref for just-allocated lockres 
+		 * otherwise the lockres will be purged immediately */
+		dlm_lockres_get(res);
+
 	}
 
 	/* at this point we have allocated everything we need,



More information about the Ocfs2-commits mailing list