[Ocfs2-commits] zab commits r2480 - trunk/fs/ocfs2

svn-commits at oss.oracle.com svn-commits at oss.oracle.com
Thu Jul 28 13:01:05 CDT 2005


Author: zab
Signed-off-by: mfasheh
Date: 2005-07-28 13:01:03 -0500 (Thu, 28 Jul 2005)
New Revision: 2480

Modified:
   trunk/fs/ocfs2/aio.c
   trunk/fs/ocfs2/dlmglue.c
   trunk/fs/ocfs2/mmap.c
Log:
o return EIOCBRETRY instead of EIOCBQUEUED from dlmglue when an aio op hits a
  blocked lock -- the caller has to retry the op to make more progress,
  dlmglue isn't goint to call aio_complete()

Signed-off-by: mfasheh


Modified: trunk/fs/ocfs2/aio.c
===================================================================
--- trunk/fs/ocfs2/aio.c	2005-07-26 21:51:09 UTC (rev 2479)
+++ trunk/fs/ocfs2/aio.c	2005-07-28 18:01:03 UTC (rev 2480)
@@ -197,12 +197,11 @@
 	return okp;
 }
 
-/* this is a hack until 2.6 gets its story straight regarding bubbling up
- * EIOCBQUEUED and the like.  in mainline we'd pass an iocb down and do lots of
- * is_sync() testing.  In suparna's patches the dlm would use waitqueues and
- * the waiting primatives would test current->wait for sync.  until that gets
- * settled we have a very limited async/cb mechanism in the dlm and have it
- * call this which triggers a retry. */
+/* The DLM supports a minimal notion of AIO lock acquiry.  Instead of testing
+ * the iocb or current-> like kernel fs/block paths tend to, it takes an
+ * explicit callback which it calls when a lock state attempt makes forward
+ * progress.  It would be better if it worked with the native
+ * kernel AIO mechanics */
 static void ocfs2_aio_kick(int status, unsigned long data)
 {
 	struct kiocb *iocb = (struct kiocb *)data;
@@ -263,13 +262,13 @@
 		target_binode->ba_lock_data = filp->f_flags & O_DIRECT ? 0 : 1;
 	}
 
-	/* this might return EIOCBQUEUED and we'll come back again to
+	/* this might return EIOCBRETRY and we'll come back again to
 	 * continue the locking.  It's harmless to call it once it has
 	 * returned success.. */
 	okp->kp_info.wl_unlock_ctxt = 1; /* re-use the write info path */
 	ret = ocfs2_lock_buffer_inodes(&okp->kp_ctxt, NULL);
 	if (ret < 0) {
-		if (ret != -EIOCBQUEUED)
+		if (ret != -EIOCBRETRY)
 			mlog_errno(ret);
 		goto setmask;
 	}

Modified: trunk/fs/ocfs2/dlmglue.c
===================================================================
--- trunk/fs/ocfs2/dlmglue.c	2005-07-26 21:51:09 UTC (rev 2479)
+++ trunk/fs/ocfs2/dlmglue.c	2005-07-28 18:01:03 UTC (rev 2480)
@@ -978,7 +978,7 @@
 		/* is someone sitting in dlm_lock? If so, wait on
 		 * them. */
 		lockres_add_flag_callback(lockres, fcb, OCFS2_LOCK_BUSY, 0);
-		ret = -EIOCBQUEUED;
+		ret = -EIOCBRETRY;
 		goto unlock;
 	}
 
@@ -999,7 +999,7 @@
 		/* is the lock is currently blocked on behalf of
 		 * another node */
 		lockres_add_flag_callback(lockres, fcb, OCFS2_LOCK_BLOCKED, 0);
-		ret = -EIOCBQUEUED;
+		ret = -EIOCBRETRY;
 		goto unlock;
 	}
 
@@ -1062,7 +1062,7 @@
 	/* Non-async callers will always wait here for dlm operations
 	 * to complete. We must be careful to re-initialize the
 	 * completion before looping back. */
-	if (ret == -EIOCBQUEUED && sync) {
+	if (ret == -EIOCBRETRY && sync) {
 		wait_for_completion(&sc.sc_complete);
 		ret = sc.sc_status;
 		if (ret == 0) {
@@ -1072,7 +1072,7 @@
 	}
 
 	/* Only free the async fcb on error. */
-	if (ret && ret != -EIOCBQUEUED && !sync) {
+	if (ret && ret != -EIOCBRETRY && !sync) {
 		mlog_bug_on_msg(!list_empty(&fcb->fc_lockres_item),
 				"Lockres %s, freeing flag callback in use\n",
 				lockres->l_name);
@@ -1622,7 +1622,7 @@
 	status = ocfs2_cluster_lock(osb, lockres, level, dlm_flags, cb,
 				    cb_data);
 	if (status < 0) {
-		if (status != -EAGAIN && status != -EIOCBQUEUED)
+		if (status != -EAGAIN && status != -EIOCBRETRY)
 			mlog_errno(status);
 		goto bail;
 	}

Modified: trunk/fs/ocfs2/mmap.c
===================================================================
--- trunk/fs/ocfs2/mmap.c	2005-07-26 21:51:09 UTC (rev 2479)
+++ trunk/fs/ocfs2/mmap.c	2005-07-28 18:01:03 UTC (rev 2480)
@@ -334,7 +334,7 @@
 	 * that we've already seen.  If we're moving from one we've locked
 	 * to one we haven't then we mark this node in the ctxt so that
 	 * we'll return to it in a future after, say, hitting last_inode
-	 * or EIOCBQUEUED in lock_buffer_inodes */
+	 * or EIOCBRETRY in lock_buffer_inodes */
 	if (pos && pos->ba_locked && binode)
 		ctxt->b_next_unlocked = binode;
 
@@ -369,7 +369,7 @@
 						      ctxt->b_cb_data);
 
 			if (status < 0) {
-				if (status != -EIOCBQUEUED)
+				if (status != -EIOCBRETRY)
 					mlog_errno(status);
 				goto bail;
 			}
@@ -382,7 +382,7 @@
 			data_level = binode->ba_lock_data_level;
 			status = ocfs2_data_lock(inode, data_level);
 			if (status < 0) {
-				if (status == -EIOCBQUEUED)
+				if (status == -EIOCBRETRY)
 					goto bail;
 
 				/* clean up the metadata lock that we took
@@ -439,7 +439,7 @@
  * file writes and AIO writes come in through here.  This function does no
  * teardown on its own.  The caller must examine the info struct to see if it
  * needs to release locks or i_sem, etc.  This function is also restartable in
- * that it can return EIOCBQUEUED if it would have blocked in the dlm.  It
+ * that it can return EIOCBRETRY if it would have blocked in the dlm.  It
  * stores its partial progress in the info struct so the caller can call back
  * in when it thinks the dlm won't block any more.  Thus, the caller must zero
  * the info struct before calling in the first time.
@@ -471,7 +471,7 @@
 					   count, ctxt,
 					   &info->wl_target_binode);
 		if (ret < 0) {
-			BUG_ON(ret == -EIOCBQUEUED);
+			BUG_ON(ret == -EIOCBRETRY);
 			mlog_errno(ret);
 			goto bail;
 		}
@@ -483,7 +483,7 @@
 		info->wl_unlock_ctxt = 1;
 		ret = ocfs2_lock_buffer_inodes(ctxt, inode);
 		if (ret < 0) {
-			if (ret != -EIOCBQUEUED)
+			if (ret != -EIOCBRETRY)
 				mlog_errno(ret);
 			goto bail;
 		}
@@ -642,7 +642,7 @@
 	/* This will lock everyone who's order puts them *after* our inode. */
 	ret = ocfs2_lock_buffer_inodes(ctxt, NULL);
 	if (ret < 0) {
-		if (ret != -EIOCBQUEUED)
+		if (ret != -EIOCBRETRY)
 			mlog_errno(ret);
 		goto bail;
 	}



More information about the Ocfs2-commits mailing list