[Ocfs2-commits] zab commits r2625 - branches/locking-changes/fs/ocfs2

svn-commits at oss.oracle.com svn-commits at oss.oracle.com
Tue Sep 27 20:31:04 CDT 2005


Author: zab
Date: 2005-09-27 20:31:02 -0500 (Tue, 27 Sep 2005)
New Revision: 2625

Modified:
   branches/locking-changes/fs/ocfs2/dlmglue.c
   branches/locking-changes/fs/ocfs2/dlmglue.h
Log:
blah blah


Modified: branches/locking-changes/fs/ocfs2/dlmglue.c
===================================================================
--- branches/locking-changes/fs/ocfs2/dlmglue.c	2005-09-28 01:02:10 UTC (rev 2624)
+++ branches/locking-changes/fs/ocfs2/dlmglue.c	2005-09-28 01:31:02 UTC (rev 2625)
@@ -172,7 +172,7 @@
 static int ocfs2_lock_create(ocfs2_super *osb,
 			     struct ocfs2_lock_res *lockres,
 			     int level,
-			     int flags);
+			     int dlm_flags);
 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
 						     int wanted);
 static int ocfs2_cluster_lock(ocfs2_super *osb,
@@ -209,12 +209,6 @@
 				  struct buffer_head **bh);
 static void ocfs2_drop_osb_locks(ocfs2_super *osb);
 static inline int ocfs2_highest_compat_lock_level(int level);
-static int __ocfs2_downconvert_lock(ocfs2_super *osb,
-				    struct ocfs2_lock_res *lockres,
-				    int new_level,
-				    int lvb);
-static int __ocfs2_cancel_convert(ocfs2_super *osb,
-				  struct ocfs2_lock_res *lockres);
 static inline int ocfs2_can_downconvert_meta_lock(struct inode *inode,
 						  struct ocfs2_lock_res *lockres,
 						  int new_level);
@@ -239,18 +233,19 @@
 {
 	struct ocfs2_lvb *lvb = (struct ocfs2_lvb *) lockres->l_lksb.lvb;
 	unsigned int lvb_seq;
+	unsigned long flags;
 	int ret = 0;
 
 	mlog_entry_void();
 
-	spin_lock(&lockres->l_lock);
+	spin_lock_irqsave(&lockres->l_lock, flags);
 
 	lvb_seq = be32_to_cpu(lvb->lvb_seq);
 	if (lvb_seq &&
 	    lockres->l_local_seq == lvb_seq)
 		ret = 1;
 
-	spin_unlock(&lockres->l_lock);
+	spin_unlock_irqrestore(&lockres->l_lock, flags);
 
 	mlog_exit(ret);
 	return ret;
@@ -260,16 +255,17 @@
 {
 	struct ocfs2_lvb *lvb = (struct ocfs2_lvb *) lockres->l_lksb.lvb;
 	unsigned int lvb_seq;
+	unsigned long flags;
 
 	mlog_entry_void();
 
-	spin_lock(&lockres->l_lock);
+	spin_lock_irqsave(&lockres->l_lock, flags);
 
 	lvb_seq = be32_to_cpu(lvb->lvb_seq);
 	if (lvb_seq)
 		lockres->l_local_seq = lvb_seq;
 
-	spin_unlock(&lockres->l_lock);
+	spin_unlock_irqrestore(&lockres->l_lock, flags);
 
 	mlog_exit_void();
 }
@@ -279,19 +275,20 @@
 						     unsigned int *trunc_clusters)
 {
 	struct ocfs2_meta_lvb *lvb;
+	unsigned long flags;
 
 	mlog_entry_void();
 
 	BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_META);
 
-	spin_lock(&lockres->l_lock);
+	spin_lock_irqsave(&lockres->l_lock, flags);
 	BUG_ON(lockres->l_level <= LKM_NLMODE);
 
 	lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
 	if (trunc_clusters)
 		*trunc_clusters = be32_to_cpu(lvb->lvb_trunc_clusters);
 
-	spin_unlock(&lockres->l_lock);
+	spin_unlock_irqrestore(&lockres->l_lock, flags);
 
 	mlog_exit_void();
 }
@@ -578,6 +575,7 @@
 	struct ocfs2_lock_res *lockres = opaque;
 	struct inode *inode;
 	struct dlm_lockstatus *lksb;
+	unsigned long flags;
 
 	mlog_entry_void();
 
@@ -589,14 +587,14 @@
 
 	BUG_ON(!ocfs2_is_inode_lock(lockres));
 
-	spin_lock(&lockres->l_lock);
+	spin_lock_irqsave(&lockres->l_lock, flags);
 
 	lksb = &(lockres->l_lksb);
 	if (lksb->status != DLM_NORMAL) {
 		mlog(ML_ERROR, "ocfs2_inode_ast_func: lksb status value of %u "
 		     "on inode %"MLFu64"\n", lksb->status,
 		     OCFS2_I(inode)->ip_blkno);
-		spin_unlock(&lockres->l_lock);
+		spin_unlock_irqrestore(&lockres->l_lock, flags);
 		mlog_exit_void();
 		return;
 	}
@@ -628,7 +626,7 @@
 	/* set it to something invalid so if we get called again we
 	 * can catch it. */
 	lockres->l_action = OCFS2_AST_INVALID;
-	spin_unlock(&lockres->l_lock);
+	spin_unlock_irqrestore(&lockres->l_lock, flags);
 	wake_up(&lockres->l_event);
 
 	mlog_exit_void();
@@ -665,16 +663,17 @@
 				    int level)
 {
 	int needs_downconvert;
+	unsigned long flags;
 
 	mlog_entry_void();
 
 	BUG_ON(level <= LKM_NLMODE);
 
-	spin_lock(&lockres->l_lock);
+	spin_lock_irqsave(&lockres->l_lock, flags);
 	needs_downconvert = ocfs2_generic_handle_bast(lockres, level);
 	if (needs_downconvert)
 		ocfs2_schedule_blocked_lock(osb, lockres);
-	spin_unlock(&lockres->l_lock);
+	spin_unlock_irqrestore(&lockres->l_lock, flags);
 
 	ocfs2_kick_vote_thread(osb);
 
@@ -709,13 +708,14 @@
 				   int ignore_refresh)
 {
 	struct dlm_lockstatus *lksb = &lockres->l_lksb;
+	unsigned long flags;
 
-	spin_lock(&lockres->l_lock);
+	spin_lock_irqsave(&lockres->l_lock, flags);
 
 	if (lksb->status != DLM_NORMAL) {
 		mlog(ML_ERROR, "lockres %s: lksb status value of %u!\n",
 		     lockres->l_name, lksb->status);
-		spin_unlock(&lockres->l_lock);
+		spin_unlock_irqrestore(&lockres->l_lock, flags);
 		return;
 	}
 
@@ -739,7 +739,7 @@
 	/* set it to something invalid so if we get called again we
 	 * can catch it. */
 	lockres->l_action = OCFS2_AST_INVALID;
-	spin_unlock(&lockres->l_lock);
+	spin_unlock_irqrestore(&lockres->l_lock, flags);
 
 	wake_up(&lockres->l_event);
 }
@@ -809,14 +809,16 @@
 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
 						int convert)
 {
+	unsigned long flags;
+
 	mlog_entry_void();
-	spin_lock(&lockres->l_lock);
+	spin_lock_irqsave(&lockres->l_lock, flags);
 	lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
 	if (convert)
 		lockres->l_action = OCFS2_AST_INVALID;
 	else
 		lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
-	spin_unlock(&lockres->l_lock);
+	spin_unlock_irqrestore(&lockres->l_lock, flags);
 
 	wake_up(&lockres->l_event);
 	mlog_exit_void();
@@ -829,32 +831,33 @@
 static int ocfs2_lock_create(ocfs2_super *osb,
 			     struct ocfs2_lock_res *lockres,
 			     int level,
-			     int flags)
+			     int dlm_flags)
 {
 	int ret = 0;
 	enum dlm_status status;
+	unsigned long flags;
 
 	mlog_entry_void();
 
 	mlog(0, "lock %s, level = %d, flags = %d\n", lockres->l_name, level,
-	     flags);
+	     dlm_flags);
 
-	spin_lock(&lockres->l_lock);
+	spin_lock_irqsave(&lockres->l_lock, flags);
 	if ((lockres->l_flags & OCFS2_LOCK_ATTACHED) ||
 	    (lockres->l_flags & OCFS2_LOCK_BUSY)) {
-		spin_unlock(&lockres->l_lock);
+		spin_unlock_irqrestore(&lockres->l_lock, flags);
 		goto bail;
 	}
 
 	lockres->l_action = OCFS2_AST_ATTACH;
 	lockres->l_requested = level;
 	lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
-	spin_unlock(&lockres->l_lock);
+	spin_unlock_irqrestore(&lockres->l_lock, flags);
 
 	status = dlmlock(osb->dlm,
 			 level,
 			 &lockres->l_lksb,
-			 flags,
+			 dlm_flags,
 			 lockres->l_name,
 			 lockres->l_ops->ast,
 			 lockres,
@@ -875,11 +878,12 @@
 static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres,
 					int flag)
 {
+	unsigned long flags;
 	int ret;
 
-	spin_lock(&lockres->l_lock);
+	spin_lock_irqsave(&lockres->l_lock, flags);
 	ret = lockres->l_flags & flag;
-	spin_unlock(&lockres->l_lock);
+	spin_unlock_irqrestore(&lockres->l_lock, flags);
 
 	return ret;
 }
@@ -946,6 +950,7 @@
 	enum dlm_status status;
 	int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
 	int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */
+	unsigned long flags;
 
 	mlog_entry_void();
 
@@ -959,7 +964,7 @@
 		goto out;
 	}
 
-	spin_lock(&lockres->l_lock);
+	spin_lock_irqsave(&lockres->l_lock, flags);
 
 	mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
 			"Cluster lock called on freeing lockres %s! flags "
@@ -979,7 +984,7 @@
 
 	if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
 		/* lock has not been created yet. */
-		spin_unlock(&lockres->l_lock);
+		spin_unlock_irqrestore(&lockres->l_lock, flags);
 
 		ret = ocfs2_lock_create(osb, lockres, LKM_NLMODE, 0);
 		if (ret < 0) {
@@ -1006,7 +1011,7 @@
 		lockres->l_action = OCFS2_AST_CONVERT;
 		lockres->l_requested = level;
 		lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
-		spin_unlock(&lockres->l_lock);
+		spin_unlock_irqrestore(&lockres->l_lock, flags);
 
 		BUG_ON(level == LKM_IVMODE);
 		BUG_ON(level == LKM_NLMODE);
@@ -1052,7 +1057,7 @@
 
 	ret = 0;
 unlock:
-	spin_unlock(&lockres->l_lock);
+	spin_unlock_irqrestore(&lockres->l_lock, flags);
 out:
 	if (wait) {
 		ret = ocfs2_wait_for_mask(&mw);
@@ -1069,11 +1074,13 @@
 				 struct ocfs2_lock_res *lockres,
 				 int level)
 {
+	unsigned long flags;
+
 	mlog_entry_void();
-	spin_lock(&lockres->l_lock);
+	spin_lock_irqsave(&lockres->l_lock, flags);
 	ocfs2_dec_holders(lockres, level);
 	ocfs2_vote_on_unlock(osb, lockres);
-	spin_unlock(&lockres->l_lock);
+	spin_unlock_irqrestore(&lockres->l_lock, flags);
 	mlog_exit_void();
 }
 
@@ -1081,11 +1088,12 @@
 				       struct ocfs2_lock_res *lockres)
 {
 	ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+	unsigned long flags;
 
-	spin_lock(&lockres->l_lock);
+	spin_lock_irqsave(&lockres->l_lock, flags);
 	BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
 	lockres_or_flags(lockres, OCFS2_LOCK_LOCAL);
-	spin_unlock(&lockres->l_lock);
+	spin_unlock_irqrestore(&lockres->l_lock, flags);
 
 	return ocfs2_lock_create(osb, lockres, LKM_EXMODE, LKM_LOCAL);
 }
@@ -1378,6 +1386,7 @@
 {
 	struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_meta_lockres;
 	struct ocfs2_meta_lvb *lvb;
+	unsigned long flags;
 	u32 i_clusters;
 
 	mlog_entry_void();
@@ -1388,10 +1397,10 @@
 	i_clusters = OCFS2_I(inode)->ip_clusters;
 	spin_unlock(&OCFS2_I(inode)->ip_lock);
 
-	spin_lock(&lockres->l_lock);
+	spin_lock_irqsave(&lockres->l_lock, flags);
 	if (lockres->l_level == LKM_EXMODE)
 		lvb->lvb_trunc_clusters = cpu_to_be32(i_clusters);
-	spin_unlock(&lockres->l_lock);
+	spin_unlock_irqrestore(&lockres->l_lock, flags);
 
 	mlog_exit_void();
 }
@@ -1430,19 +1439,20 @@
  *   ocfs2_complete_lock_res_refresh afterwards. */
 static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
 {
+	unsigned long flags;
+	int status = 0;
 
-	int status = 0;
 	mlog_entry_void();
 
 refresh_check:
-	spin_lock(&lockres->l_lock);
+	spin_lock_irqsave(&lockres->l_lock, flags);
 	if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
-		spin_unlock(&lockres->l_lock);
+		spin_unlock_irqrestore(&lockres->l_lock, flags);
 		goto bail;
 	}
 
 	if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
-		spin_unlock(&lockres->l_lock);
+		spin_unlock_irqrestore(&lockres->l_lock, flags);
 
 		ocfs2_wait_on_refreshing_lock(lockres);
 		goto refresh_check;
@@ -1450,7 +1460,7 @@
 
 	/* Ok, I'll be the one to refresh this lock. */
 	lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING);
-	spin_unlock(&lockres->l_lock);
+	spin_unlock_irqrestore(&lockres->l_lock, flags);
 
 	status = 1;
 bail:
@@ -1463,13 +1473,14 @@
 static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres,
 						   int status)
 {
+	unsigned long flags;
 	mlog_entry_void();
 
-	spin_lock(&lockres->l_lock);
+	spin_lock_irqsave(&lockres->l_lock, flags);
 	lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING);
 	if (!status)
 		lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
-	spin_unlock(&lockres->l_lock);
+	spin_unlock_irqrestore(&lockres->l_lock, flags);
 
 	wake_up(&lockres->l_event);
 
@@ -1620,7 +1631,7 @@
 			 ocfs2_journal_handle *handle,
 			 struct buffer_head **ret_bh,
 			 int ex,
-			 int flags)
+			 int arg_flags)
 {
 	int status, level, dlm_flags, acquired;
 	struct ocfs2_lock_res *lockres;
@@ -1645,7 +1656,7 @@
 		goto bail;
 	}
 
-	if (!(flags & OCFS2_META_LOCK_RECOVERY))
+	if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
 		wait_event(osb->recovery_event,
 			   ocfs2_node_map_is_empty(osb, &osb->recovery_map));
 
@@ -1653,7 +1664,7 @@
 	lockres = &OCFS2_I(inode)->ip_meta_lockres;
 	level = ex ? LKM_EXMODE : LKM_PRMODE;
 	dlm_flags = 0;
-	if (flags & OCFS2_META_LOCK_NOQUEUE)
+	if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
 		dlm_flags |= LKM_NOQUEUE;
 
 	status = ocfs2_cluster_lock(osb, lockres, level, dlm_flags);
@@ -1670,7 +1681,7 @@
 	 * the lower dlm layers. The second time though, we've
 	 * committed to owning this lock so we don't allow signals to
 	 * abort the operation. */
-	if (!(flags & OCFS2_META_LOCK_RECOVERY))
+	if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
 		wait_event(osb->recovery_event,
 			   ocfs2_node_map_is_empty(osb, &osb->recovery_map));
 
@@ -1881,17 +1892,18 @@
 static void ocfs2_unlock_ast_func(void *opaque, enum dlm_status status)
 {
 	struct ocfs2_lock_res *lockres = opaque;
+	unsigned long flags;
 
 	mlog_entry_void();
 
 	mlog(0, "UNLOCK AST called on lock %s, action = %d\n", lockres->l_name,
 	     lockres->l_unlock_action);
 
-	spin_lock(&lockres->l_lock);
+	spin_lock_irqsave(&lockres->l_lock, flags);
 	/* We tried to cancel a convert request, but it was already
 	 * granted. All we want to do here is clear our unlock
 	 * state. The wake_up call done at the bottom is redundant
-	 * (__ocfs2_cancel_convert doesn't sleep on this) but doesn't
+	 * (ocfs2_prepare_cancel_convert doesn't sleep on this) but doesn't
 	 * hurt anything anyway */
 	if (status == DLM_CANCELGRANT &&
 	    lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
@@ -1907,7 +1919,7 @@
 		mlog(ML_ERROR, "Dlm passes status %d for lock %s, "
 		     "unlock_action %d\n", status, lockres->l_name,
 		     lockres->l_unlock_action);
-		spin_unlock(&lockres->l_lock);
+		spin_unlock_irqrestore(&lockres->l_lock, flags);
 		return;
 	}
 
@@ -1926,65 +1938,13 @@
 	lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
 complete_unlock:
 	lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
-	spin_unlock(&lockres->l_lock);
+	spin_unlock_irqrestore(&lockres->l_lock, flags);
 
 	wake_up(&lockres->l_event);
 
 	mlog_exit_void();
 }
 
-/* BEWARE: called with lockres lock, and always drops it. Caller
- * should not be calling us with a busy lock... */
-static int __ocfs2_drop_lock(ocfs2_super *osb,
-			     struct ocfs2_lock_res *lockres)
-{
-	int ret = 0;
-	enum dlm_status status;
-
-	if (lockres->l_flags & OCFS2_LOCK_BUSY)
-		mlog(ML_ERROR, "destroying busy lock: \"%s\"\n",
-		     lockres->l_name);
-	if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
-		mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name);
-
-	if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
-		spin_unlock(&lockres->l_lock);
-		goto bail;
-	}
-
-	lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED);
-
-	/* make sure we never get here while waiting for an ast to
-	 * fire. */
-	BUG_ON(lockres->l_action != OCFS2_AST_INVALID);
-
-	/* is this necessary? */
-	lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
-	lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK;
-	spin_unlock(&lockres->l_lock);
-
-	mlog(0, "lock %s\n", lockres->l_name);
-
-	status = dlmunlock(osb->dlm,
-			   &lockres->l_lksb,
-			   LKM_VALBLK,
-			   lockres->l_ops->unlock_ast,
-			   lockres);
-	if (status != DLM_NORMAL) {
-		ocfs2_log_dlm_error("dlmunlock", status, lockres);
-		mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
-		dlm_print_one_lock(lockres->l_lksb.lockid);
-		BUG();
-	}
-	mlog(0, "lock %s, successfull return from dlmunlock\n",
-	     lockres->l_name);
-
-	ocfs2_wait_on_busy_lock(lockres);
-bail:
-	mlog_exit(ret);
-	return ret;
-}
-
 typedef void (ocfs2_pre_drop_cb_t)(struct ocfs2_lock_res *, void *);
 
 struct drop_lock_cb {
@@ -1996,11 +1956,14 @@
 			   struct ocfs2_lock_res *lockres,
 			   struct drop_lock_cb *dcb)
 {
+	enum dlm_status status;
+	unsigned long flags;
+
 	/* We didn't get anywhere near actually using this lockres. */
 	if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED))
-		return 0;
+		goto out;
 
-	spin_lock(&lockres->l_lock);
+	spin_lock_irqsave(&lockres->l_lock, flags);
 
 	mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING),
 			"lockres %s, flags 0x%lx\n",
@@ -2012,22 +1975,58 @@
 		     lockres->l_name, lockres->l_flags, lockres->l_action,
 		     lockres->l_unlock_action);
 
-		spin_unlock(&lockres->l_lock);
+		spin_unlock_irqrestore(&lockres->l_lock, flags);
 
 		/* XXX: Today we just wait on any busy
 		 * locks... Perhaps we need to cancel converts in the
 		 * future? */
 		ocfs2_wait_on_busy_lock(lockres);
 
-		spin_lock(&lockres->l_lock);
+		spin_lock_irqsave(&lockres->l_lock, flags);
 	}
 
 	if (dcb)
 		dcb->drop_func(lockres, dcb->drop_data);
 
-	/* This will drop the spinlock for us. Dur de dur, at least we
-	 * keep the ugliness in one place :) */
-	return  __ocfs2_drop_lock(osb, lockres);
+	if (lockres->l_flags & OCFS2_LOCK_BUSY)
+		mlog(ML_ERROR, "destroying busy lock: \"%s\"\n",
+		     lockres->l_name);
+	if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
+		mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name);
+
+	if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
+		spin_unlock_irqrestore(&lockres->l_lock, flags);
+		goto out;
+	}
+
+	lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED);
+
+	/* make sure we never get here while waiting for an ast to
+	 * fire. */
+	BUG_ON(lockres->l_action != OCFS2_AST_INVALID);
+
+	/* is this necessary? */
+	lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
+	lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK;
+	spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+	mlog(0, "lock %s\n", lockres->l_name);
+
+	status = dlmunlock(osb->dlm, &lockres->l_lksb, LKM_VALBLK,
+			   lockres->l_ops->unlock_ast, lockres);
+	if (status != DLM_NORMAL) {
+		ocfs2_log_dlm_error("dlmunlock", status, lockres);
+		mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
+		dlm_print_one_lock(lockres->l_lksb.lockid);
+		BUG();
+	}
+	mlog(0, "lock %s, successfull return from dlmunlock\n",
+	     lockres->l_name);
+
+	ocfs2_wait_on_busy_lock(lockres);
+out:
+	mlog_exit(0);
+	return 0;
 }
 
 /* Mark the lockres as being dropped. It will no longer be
@@ -2040,14 +2039,15 @@
 {
 	int status;
 	struct ocfs2_mask_waiter mw;
+	unsigned long flags;
 
 	ocfs2_init_mask_waiter(&mw);
 
-	spin_lock(&lockres->l_lock);
+	spin_lock_irqsave(&lockres->l_lock, flags);
 	lockres->l_flags |= OCFS2_LOCK_FREEING;
 	while (lockres->l_flags & OCFS2_LOCK_QUEUED) {
 		lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0);
-		spin_unlock(&lockres->l_lock);
+		spin_unlock_irqrestore(&lockres->l_lock, flags);
 
 		mlog(0, "Waiting on lockres %s\n", lockres->l_name);
 
@@ -2055,9 +2055,9 @@
 		if (status)
 			mlog_errno(status);
 
-		spin_lock(&lockres->l_lock);
+		spin_lock_irqsave(&lockres->l_lock, flags);
 	}
-	spin_unlock(&lockres->l_lock);
+	spin_unlock_irqrestore(&lockres->l_lock, flags);
 }
 
 static void ocfs2_drop_osb_locks(ocfs2_super *osb)
@@ -2133,17 +2133,11 @@
 	return status;
 }
 
-/* called with the spinlock held, and WILL drop it. */
-static int __ocfs2_downconvert_lock(ocfs2_super *osb,
-				    struct ocfs2_lock_res *lockres,
-				    int new_level,
-				    int lvb)
+static void ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
+				      int new_level)
 {
-	int ret, flags = LKM_CONVERT;
-	enum dlm_status status;
+	assert_spin_locked(&lockres->l_lock);
 
-	mlog_entry_void();
-
 	BUG_ON(lockres->l_blocking <= LKM_NLMODE);
 
 	if (lockres->l_level <= new_level) {
@@ -2152,21 +2146,31 @@
 		BUG();
 	}
 
-	mlog(0, "lock %s, new_level = %d, l_blocking = %d, lvb = %d\n",
-	     lockres->l_name, new_level, lockres->l_blocking, lvb);
+	mlog(0, "lock %s, new_level = %d, l_blocking = %d\n",
+	     lockres->l_name, new_level, lockres->l_blocking);
 
 	lockres->l_action = OCFS2_AST_DOWNCONVERT;
 	lockres->l_requested = new_level;
 	lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
-	spin_unlock(&lockres->l_lock);
+}
 
+static int ocfs2_downconvert_lock(ocfs2_super *osb,
+				  struct ocfs2_lock_res *lockres,
+				  int new_level,
+				  int lvb)
+{
+	int ret, dlm_flags = LKM_CONVERT;
+	enum dlm_status status;
+
+	mlog_entry_void();
+
 	if (lvb)
-		flags |= LKM_VALBLK;
+		dlm_flags |= LKM_VALBLK;
 
 	status = dlmlock(osb->dlm,
 			 new_level,
 			 &lockres->l_lksb,
-			 flags,
+			 dlm_flags,
 			 lockres->l_name,
 			 lockres->l_ops->ast,
 			 lockres,
@@ -2184,17 +2188,24 @@
 	return ret;
 }
 
-/* called with the spinlock held, and WILL drop it. */
-static int __ocfs2_cancel_convert(ocfs2_super *osb,
-				  struct ocfs2_lock_res *lockres)
+/* returns 1 when the caller should unlock and call dlmunlock */
+static int ocfs2_prepare_cancel_convert(ocfs2_super *osb,
+				        struct ocfs2_lock_res *lockres)
 {
-	int ret;
-	enum dlm_status status;
+	assert_spin_locked(&lockres->l_lock);
 
 	mlog_entry_void();
-
 	mlog(0, "lock %s\n", lockres->l_name);
 
+	if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
+		/* If we're already trying to cancel a lock conversion
+		 * then just drop the spinlock and allow the caller to
+		 * requeue this lock. */
+
+		mlog(0, "Lockres %s, skip convert\n", lockres->l_name);
+		return 0;
+	}
+
 	/* were we in a convert when we got the bast fire? */
 	BUG_ON(lockres->l_action != OCFS2_AST_CONVERT &&
 	       lockres->l_action != OCFS2_AST_DOWNCONVERT);
@@ -2205,8 +2216,19 @@
 	mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_BUSY),
 			"lock %s, invalid flags: 0x%lx\n",
 			lockres->l_name, lockres->l_flags);
-	spin_unlock(&lockres->l_lock);
 
+	return 1;
+}
+
+static int ocfs2_cancel_convert(ocfs2_super *osb,
+				struct ocfs2_lock_res *lockres)
+{
+	int ret;
+	enum dlm_status status;
+
+	mlog_entry_void();
+	mlog(0, "lock %s\n", lockres->l_name);
+
 	ret = 0;
 	status = dlmunlock(osb->dlm,
 			   &lockres->l_lksb,
@@ -2225,25 +2247,6 @@
 	return ret;
 }
 
-static int ocfs2_cancel_convert(ocfs2_super *osb,
-				struct ocfs2_lock_res *lockres)
-{
-	assert_spin_locked(&lockres->l_lock);
-
-	if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
-		/* If we're already trying to cancel a lock conversion
-		 * then just drop the spinlock and allow the caller to
-		 * requeue this lock. */
-		spin_unlock(&lockres->l_lock);
-
-		mlog(0, "Lockres %s, skip convert\n", lockres->l_name);
-		return 0;
-	}
-
-	/* this will drop the spinlock for us. */
-	return __ocfs2_cancel_convert(osb, lockres);
-}
-
 static inline int ocfs2_can_downconvert_meta_lock(struct inode *inode,
 						  struct ocfs2_lock_res *lockres,
 						  int new_level)
@@ -2276,11 +2279,13 @@
 	int set_lvb = 0;
 	int ret = 0;
 	struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_meta_lockres;
+	unsigned long flags;
+
 	ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 
 	mlog_entry_void();
 
-	spin_lock(&lockres->l_lock);
+	spin_lock_irqsave(&lockres->l_lock, flags);
 
 	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
 
@@ -2292,9 +2297,13 @@
 
 	if (lockres->l_flags & OCFS2_LOCK_BUSY) {
 		*requeue = 1;
-		ret = ocfs2_cancel_convert(osb, lockres);
-		if (ret < 0)
-			mlog_errno(ret);
+		ret = ocfs2_prepare_cancel_convert(osb, lockres);
+		spin_unlock_irqrestore(&lockres->l_lock, flags);
+		if (ret) {
+			ret = ocfs2_cancel_convert(osb, lockres);
+			if (ret < 0)
+				mlog_errno(ret);
+		}
 		goto leave;
 	}
 
@@ -2319,19 +2328,20 @@
 			mlog(0, "lockres %s: downconverting stale lock!\n",
 			     lockres->l_name);
 
-		mlog(0, "calling __ocfs2_downconvert_lock with "
-		     "l_level=%d, l_blocking=%d, new_level=%d\n",
-		     lockres->l_level, lockres->l_blocking,
-		     new_level);
-		ret = __ocfs2_downconvert_lock(osb, lockres, new_level,
-					       set_lvb);
+		mlog(0, "calling ocfs2_downconvert_lock with l_level=%d, "
+		     "l_blocking=%d, new_level=%d\n",
+		     lockres->l_level, lockres->l_blocking, new_level);
+
+		ocfs2_prepare_downconvert(lockres, new_level);
+		spin_unlock_irqrestore(&lockres->l_lock, flags);
+		ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb);
 		goto leave;
 	}
 	if (!ocfs2_inode_fully_checkpointed(inode))
 		ocfs2_start_checkpoint(osb);
 
 	*requeue = 1;
-	spin_unlock(&lockres->l_lock);
+	spin_unlock_irqrestore(&lockres->l_lock, flags);
 	ret = 0;
 leave:
 	mlog_exit(ret);
@@ -2343,22 +2353,27 @@
 				      int *requeue,
 				      ocfs2_convert_worker_t *worker)
 {
+	unsigned long flags;
 	int blocking;
 	int new_level;
 	int ret = 0;
 
 	mlog_entry_void();
 
-	spin_lock(&lockres->l_lock);
+	spin_lock_irqsave(&lockres->l_lock, flags);
 
 	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
 
 recheck:
 	if (lockres->l_flags & OCFS2_LOCK_BUSY) {
 		*requeue = 1;
-		ret = ocfs2_cancel_convert(osb, lockres);
-		if (ret < 0)
-			mlog_errno(ret);
+		ret = ocfs2_prepare_cancel_convert(osb, lockres);
+		spin_unlock_irqrestore(&lockres->l_lock, flags);
+		if (ret) {
+			ret = ocfs2_cancel_convert(osb, lockres);
+			if (ret < 0)
+				mlog_errno(ret);
+		}
 		goto leave;
 	}
 
@@ -2366,7 +2381,7 @@
 	 * then requeue. */
 	if ((lockres->l_blocking == LKM_EXMODE)
 	    && (lockres->l_ex_holders || lockres->l_ro_holders)) {
-		spin_unlock(&lockres->l_lock);
+		spin_unlock_irqrestore(&lockres->l_lock, flags);
 		*requeue = 1;
 		ret = 0;
 		goto leave;
@@ -2376,7 +2391,7 @@
 	 * requeue if we've got any EX holders */
 	if (lockres->l_blocking == LKM_PRMODE &&
 	    lockres->l_ex_holders) {
-		spin_unlock(&lockres->l_lock);
+		spin_unlock_irqrestore(&lockres->l_lock, flags);
 		*requeue = 1;
 		ret = 0;
 		goto leave;
@@ -2393,11 +2408,11 @@
 	 * may sleep, so we save off a copy of what we're blocking as
 	 * it may change while we're not holding the spin lock. */
 	blocking = lockres->l_blocking;
-	spin_unlock(&lockres->l_lock);
+	spin_unlock_irqrestore(&lockres->l_lock, flags);
 
 	worker(lockres, blocking);
 
-	spin_lock(&lockres->l_lock);
+	spin_lock_irqsave(&lockres->l_lock, flags);
 	if (blocking != lockres->l_blocking) {
 		/* If this changed underneath us, then we can't drop
 		 * it just yet. */
@@ -2408,7 +2423,9 @@
 	*requeue = 0;
 	new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
 
-	ret = __ocfs2_downconvert_lock(osb, lockres, new_level, 0);
+	ocfs2_prepare_downconvert(lockres, new_level);
+	spin_unlock_irqrestore(&lockres->l_lock, flags);
+	ret = ocfs2_downconvert_lock(osb, lockres, new_level, 0);
 leave:
 	mlog_exit(ret);
 	return ret;
@@ -2550,6 +2567,7 @@
 {
 	int status;
 	int requeue = 0;
+	unsigned long flags;
 
 	/* Our reference to the lockres in this function can be
 	 * considered valid until we remove the OCFS2_LOCK_QUEUED
@@ -2568,16 +2586,16 @@
 	 * still be marked with OCFS2_LOCK_FREEING after this check,
 	 * but short circuiting here will still save us some
 	 * performance. */
-	spin_lock(&lockres->l_lock);
+	spin_lock_irqsave(&lockres->l_lock, flags);
 	if (lockres->l_flags & OCFS2_LOCK_FREEING)
 		goto unqueue;
-	spin_unlock(&lockres->l_lock);
+	spin_unlock_irqrestore(&lockres->l_lock, flags);
 
 	status = lockres->l_ops->unblock(lockres, &requeue);
 	if (status < 0)
 		mlog_errno(status);
 
-	spin_lock(&lockres->l_lock);
+	spin_lock_irqsave(&lockres->l_lock, flags);
 unqueue:
 	if (lockres->l_flags & OCFS2_LOCK_FREEING || !requeue) {
 		lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED);
@@ -2586,7 +2604,7 @@
 
 	mlog(0, "lockres %s, requeue = %s.\n", lockres->l_name,
 	     requeue ? "yes" : "no");
-	spin_unlock(&lockres->l_lock);
+	spin_unlock_irqrestore(&lockres->l_lock, flags);
 
 	mlog_exit_void();
 }
@@ -2625,14 +2643,15 @@
 {
 	struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_meta_lockres;
 	struct ocfs2_meta_lvb *lvb;
+	unsigned long flags;
 
-	spin_lock(&lockres->l_lock);
+	spin_lock_irqsave(&lockres->l_lock, flags);
 	BUG_ON(lockres->l_level != LKM_EXMODE);
 
 	lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
 	if (be32_to_cpu(lvb->lvb_trunc_clusters) > trunc_clusters)
 		lvb->lvb_trunc_clusters = cpu_to_be32(trunc_clusters);
-	spin_unlock(&lockres->l_lock);
+	spin_unlock_irqrestore(&lockres->l_lock, flags);
 }
 
 /* This aids in debugging situations where a bad LVB might be involved. */

Modified: branches/locking-changes/fs/ocfs2/dlmglue.h
===================================================================
--- branches/locking-changes/fs/ocfs2/dlmglue.h	2005-09-28 01:02:10 UTC (rev 2624)
+++ branches/locking-changes/fs/ocfs2/dlmglue.h	2005-09-28 01:31:02 UTC (rev 2625)
@@ -101,7 +101,7 @@
 			 ocfs2_journal_handle *handle,
 			 struct buffer_head **ret_bh,
 			 int ex,
-			 int flags);
+			 int arg_flags);
 /* 99% of the time we don't want to supply any additional flags --
  * those are for very specific cases only. */
 #define ocfs2_meta_lock(i, h, b, e) ocfs2_meta_lock_full(i, h, b, e, 0)



More information about the Ocfs2-commits mailing list