[Ocfs2-commits] zab commits r1986 - in branches/usysfsify/fs: ocfs2/dlm usysfs

svn-commits at oss.oracle.com svn-commits at oss.oracle.com
Wed Mar 16 16:46:42 CST 2005


Author: zab
Date: 2005-03-16 16:46:41 -0600 (Wed, 16 Mar 2005)
New Revision: 1986

Modified:
   branches/usysfsify/fs/ocfs2/dlm/dlmast.c
   branches/usysfsify/fs/ocfs2/dlm/dlmconvert.c
   branches/usysfsify/fs/ocfs2/dlm/dlmlock.c
   branches/usysfsify/fs/ocfs2/dlm/dlmmaster.c
   branches/usysfsify/fs/ocfs2/dlm/dlmmod.c
   branches/usysfsify/fs/ocfs2/dlm/dlmmod.h
   branches/usysfsify/fs/ocfs2/dlm/dlmrecovery.c
   branches/usysfsify/fs/ocfs2/dlm/dlmthread.c
   branches/usysfsify/fs/ocfs2/dlm/dlmunlock.c
   branches/usysfsify/fs/ocfs2/dlm/userdlm.c
   branches/usysfsify/fs/usysfs/mount.c
Log:
o fix silly ukset_init_type_name() typos
                                                                                
bring dlm/ into the simplified usysfs order.
o forbid new dlm domain registration when a local node isn't registerd
o track the local node in dlm->node_num
o s/dlm->group_index/dlm->node_num/
o send messages with a node number instead of a nm inode
o get rid of some group references


Modified: branches/usysfsify/fs/ocfs2/dlm/dlmast.c
===================================================================
--- branches/usysfsify/fs/ocfs2/dlm/dlmast.c	2005-03-16 21:30:19 UTC (rev 1985)
+++ branches/usysfsify/fs/ocfs2/dlm/dlmast.c	2005-03-16 22:46:41 UTC (rev 1986)
@@ -100,18 +100,18 @@
 	DLM_ASSERT(lksb);
 
 	/* only updates if this node masters the lockres */
-	if (res->owner == dlm->group_index) {
+	if (res->owner == dlm->node_num) {
 
 		spin_lock(&res->spinlock);
 		/* check the lksb flags for the direction */
 		if (lksb->flags & DLM_LKSB_GET_LVB) {
 			dlmprintk("getting lvb from lockres for %s node\n",
-				  lock->ml.node == dlm->group_index ? "master" :
+				  lock->ml.node == dlm->node_num ? "master" :
 				  "remote");
 			memcpy(lksb->lvb, res->lvb, DLM_LVB_LEN);
 		} else if (lksb->flags & DLM_LKSB_PUT_LVB) {
 			dlmprintk("setting lvb from lockres for %s node\n",
-				  lock->ml.node == dlm->group_index ? "master" :
+				  lock->ml.node == dlm->node_num ? "master" :
 				  "remote");
 			memcpy(res->lvb, lksb->lvb, DLM_LVB_LEN);
 		}
@@ -135,7 +135,7 @@
 	DLM_ASSERT(lksb);
 	fn = lock->ast;
 	DLM_ASSERT(fn);
-	DLM_ASSERT(lock->ml.node == dlm->group_index);
+	DLM_ASSERT(lock->ml.node == dlm->node_num);
 
 	dlm_update_lvb(dlm, res, lock);
 	(*fn)(lock->astdata);
@@ -154,7 +154,7 @@
 	DLM_ASSERT(res);
 	lksb = lock->lksb;
 	DLM_ASSERT(lksb);
-	DLM_ASSERT(lock->ml.node != dlm->group_index);
+	DLM_ASSERT(lock->ml.node != dlm->node_num);
 
 	ret = 0;
 	dlm_update_lvb(dlm, res, lock);
@@ -171,7 +171,7 @@
 	dlm_bastlockfunc_t *fn = lock->bast;
 	dlmprintk0("\n");
 
-	DLM_ASSERT(lock->ml.node == dlm->group_index);
+	DLM_ASSERT(lock->ml.node == dlm->node_num);
 	DLM_ASSERT(fn);
 	
 	(*fn)(lock->astdata, blocked_type);
@@ -239,7 +239,7 @@
 	}
 
 	/* cannot get a proxy ast message if this node owns it */
-	DLM_ASSERT(res->owner != dlm->group_index);
+	DLM_ASSERT(res->owner != dlm->node_num);
 
 	dlmprintk("lockres %.*s\n", res->lockname.len, res->lockname.name);
 	if (!dlm_is_recovery_lock(past->name, past->namelen))
@@ -326,7 +326,6 @@
 {
 	int ret = 0;
 	dlm_proxy_ast past;
-	struct inode *inode = NULL;
 	struct iovec iov[2];
 	size_t iovlen = 1;
 
@@ -335,7 +334,7 @@
 		  msg_type, blocked_type);
 
 	memset(&past, 0, sizeof(dlm_proxy_ast));
-	past.node_idx = dlm->group_index;
+	past.node_idx = dlm->node_num;
 	past.type = msg_type;
 	past.blocked_type = blocked_type;
 	past.namelen = res->lockname.len;
@@ -352,14 +351,9 @@
 		iovlen++;
 	}
 
-	ret = -EINVAL;
-	inode = nm_get_group_node_by_index(dlm->group, lock->ml.node);
-	if (inode) {
-		dlm_proxy_ast_to_net(&past);
-		ret = net_send_message_iov(DLM_PROXY_AST_MSG, dlm->key, 
-					   iov, iovlen, inode, NULL);
-		iput(inode);
-	}
+	dlm_proxy_ast_to_net(&past);
+	ret = net_send_message_iov(DLM_PROXY_AST_MSG, dlm->key, iov, iovlen,
+				   lock->ml.node, NULL);
 	if (ret < 0)
 		dlmprintk("(%d) dlm_send_proxy_ast: returning %d\n", 
 			  current->pid, ret);

Modified: branches/usysfsify/fs/ocfs2/dlm/dlmconvert.c
===================================================================
--- branches/usysfsify/fs/ocfs2/dlm/dlmconvert.c	2005-03-16 21:30:19 UTC (rev 1985)
+++ branches/usysfsify/fs/ocfs2/dlm/dlmconvert.c	2005-03-16 22:46:41 UTC (rev 1986)
@@ -188,7 +188,7 @@
 		  res->lockname.name, dlm_lock_mode_name(type));
 	/* immediately grant the new lock type */
 	lock->lksb->status = DLM_NORMAL;
-	if (lock->ml.node == dlm->group_index)
+	if (lock->ml.node == dlm->node_num)
 		dlmprintk0("doing in-place convert for nonlocal lock\n");
 	lock->ml.type = type;
 	status = DLM_NORMAL;
@@ -305,7 +305,6 @@
 					   dlm_lock_resource *res, 
 					   dlm_lock *lock, int flags, int type)
 {
-	struct inode *inode = NULL;
 	dlm_convert_lock convert;
 	int tmpret;
 	dlm_status ret;
@@ -316,7 +315,7 @@
 	dlmprintk0("\n");
 
 	memset(&convert, 0, sizeof(dlm_convert_lock));
-	convert.node_idx = dlm->group_index;
+	convert.node_idx = dlm->node_num;
 	convert.requested_type = type;
 	convert.cookie = lock->ml.cookie;
 	convert.namelen = res->lockname.len;
@@ -333,21 +332,16 @@
 		iovlen++;
 	}
 
-	ret = DLM_NOLOCKMGR;
-	inode = nm_get_group_node_by_index(dlm->group, res->owner);
-	if (inode) {
-		dlm_convert_lock_to_net(&convert);
-		tmpret = net_send_message_iov(DLM_CONVERT_LOCK_MSG, dlm->key, 
-					      iov, iovlen, inode, &status);
-		if (tmpret >= 0) {
-			// successfully sent and received
-			ret = status;  // this is already a dlm_status
-		} else {
-			dlmprintk("error occurred in net_send_message: %d\n", 
-				  tmpret);
-			ret = dlm_err_to_dlm_status(tmpret);
-		}
-		iput(inode);
+	dlm_convert_lock_to_net(&convert);
+	tmpret = net_send_message_iov(DLM_CONVERT_LOCK_MSG, dlm->key, 
+				      iov, iovlen, res->owner, &status);
+	if (tmpret >= 0) {
+		// successfully sent and received
+		ret = status;  // this is already a dlm_status
+	} else {
+		dlmprintk("error occurred in net_send_message: %d\n", 
+			  tmpret);
+		ret = dlm_err_to_dlm_status(tmpret);
 	}
 
 	return ret;

Modified: branches/usysfsify/fs/ocfs2/dlm/dlmlock.c
===================================================================
--- branches/usysfsify/fs/ocfs2/dlm/dlmlock.c	2005-03-16 21:30:19 UTC (rev 1985)
+++ branches/usysfsify/fs/ocfs2/dlm/dlmlock.c	2005-03-16 22:46:41 UTC (rev 1986)
@@ -196,7 +196,6 @@
 					       dlm_lock_resource *res, 
 					       dlm_lock *lock, int flags)
 {
-	struct inode *inode = NULL;
 	dlm_create_lock create;
 	int tmpret, status = 0;
 	dlm_status ret;
@@ -204,29 +203,23 @@
 	dlmprintk0("\n");
 
 	memset(&create, 0, sizeof(create));
-	create.node_idx = dlm->group_index;
+	create.node_idx = dlm->node_num;
 	create.requested_type = lock->ml.type;
 	create.cookie = lock->ml.cookie;
 	create.namelen = res->lockname.len;
 	create.flags = flags;
 	strncpy(create.name, res->lockname.name, create.namelen);
 
-	ret = DLM_NOLOCKMGR;
-	inode = nm_get_group_node_by_index(dlm->group, res->owner);
-	if (inode) {
-		dlm_create_lock_to_net(&create);
-		tmpret = net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, 
-					  &create, sizeof(create), 
-					  inode, &status);
-		if (tmpret >= 0) {
-			// successfully sent and received
-			ret = status;  // this is already a dlm_status
-		} else {
-			dlmprintk("error occurred in net_send_message: %d\n", 
-				  tmpret);
-			ret = dlm_err_to_dlm_status(tmpret);
-		}
-		iput(inode);
+	dlm_create_lock_to_net(&create);
+	tmpret = net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create,
+				  sizeof(create), res->owner, &status);
+	if (tmpret >= 0) {
+		// successfully sent and received
+		ret = status;  // this is already a dlm_status
+	} else {
+		dlmprintk("error occurred in net_send_message: %d\n", 
+			  tmpret);
+		ret = dlm_err_to_dlm_status(tmpret);
 	}
 
 	return ret;

Modified: branches/usysfsify/fs/ocfs2/dlm/dlmmaster.c
===================================================================
--- branches/usysfsify/fs/ocfs2/dlm/dlmmaster.c	2005-03-16 21:30:19 UTC (rev 1985)
+++ branches/usysfsify/fs/ocfs2/dlm/dlmmaster.c	2005-03-16 22:46:41 UTC (rev 1986)
@@ -242,8 +242,8 @@
 	/* copy off the node_map and register hb callbacks on our copy */
 	memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
 	memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
-	clear_bit(dlm->group_index, mle->vote_map);
-	clear_bit(dlm->group_index, mle->node_map);
+	clear_bit(dlm->node_num, mle->vote_map);
+	clear_bit(dlm->node_num, mle->node_map);
 
 	/* attach the mle to the domain node up/down events */
 	__dlm_mle_attach_hb_events(dlm, mle);
@@ -272,14 +272,11 @@
 
 
 void dlm_mle_node_down(dlm_ctxt *dlm, dlm_master_list_entry *mle,
-		       struct inode *group, struct inode *node, int idx)
+		       struct nm_node *node, int idx)
 {
 	DLM_ASSERT(mle);
 	DLM_ASSERT(dlm);
 
-	if (dlm->group != group)
-		return;
-
 	spin_lock(&mle->spinlock);
 
 	if (!test_bit(idx, mle->node_map))
@@ -297,14 +294,11 @@
 }
 
 void dlm_mle_node_up(dlm_ctxt *dlm, dlm_master_list_entry *mle,
-		       struct inode *group, struct inode *node, int idx)
+		     struct nm_node *node, int idx)
 {
 	DLM_ASSERT(mle);
 	DLM_ASSERT(dlm);
 
-	if (dlm->group != group)
-		return;
-
 	spin_lock(&mle->spinlock);
 
 #if 0	
@@ -364,7 +358,7 @@
 {
 	assert_spin_locked(&res->spinlock);
 
-	if (owner == dlm->group_index)
+	if (owner == dlm->node_num)
 		atomic_inc(&dlm->local_resources);
 	else if (owner == DLM_LOCK_RES_OWNER_UNKNOWN)
 		atomic_inc(&dlm->unknown_resources);
@@ -383,7 +377,7 @@
 	if (owner == res->owner)
 		return;
 
-	if (res->owner == dlm->group_index)
+	if (res->owner == dlm->node_num)
 		atomic_dec(&dlm->local_resources);
 	else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN)
 		atomic_dec(&dlm->unknown_resources);
@@ -589,7 +583,7 @@
 		/* caller knows it's safe to assume it's not mastered elsewhere
 		 * DONE!  return right away */
 		spin_lock(&res->spinlock);
-		dlm_change_lockres_owner(dlm, res, dlm->group_index);
+		dlm_change_lockres_owner(dlm, res, dlm->node_num);
 		__dlm_insert_lock(dlm, res);
 		spin_unlock(&res->spinlock);
 		spin_unlock(&dlm->spinlock);
@@ -729,11 +723,11 @@
 		/* have all nodes responded? */
 		if (voting_done) {
 			bit = find_next_bit(mle->maybe_map, NM_MAX_NODES, 0);
-			if (dlm->group_index <= bit) {
+			if (dlm->node_num <= bit) {
 				/* my node number is lowest.
 			 	 * now tell other nodes that I am 
 				 * mastering this. */
-				mle->master = dlm->group_index;
+				mle->master = dlm->node_num;
 				assert = 1;
 				sleep = 0;
 			}
@@ -765,7 +759,7 @@
 
 	ret = 0;   /* done */	
 	if (assert) {
-		m = dlm->group_index;
+		m = dlm->node_num;
 		ret = dlm_do_assert_master(dlm, res->lockname.name, 
 					   res->lockname.len, mle->vote_map);
 		if (ret) {
@@ -808,13 +802,12 @@
 
 static int dlm_do_master_request(dlm_master_list_entry *mle, int to)
 {
-	struct inode *inode = NULL;
 	dlm_ctxt *dlm = mle->dlm;
 	dlm_master_request request;
 	int ret, response=0;
 
 	memset(&request, 0, sizeof(request));
-	request.node_idx = dlm->group_index;
+	request.node_idx = dlm->node_num;
 	if (mle->type == DLM_MLE_BLOCK) {
 		request.namelen = mle->u.name.len;
 		strncpy(request.name, mle->u.name.name, request.namelen);
@@ -824,53 +817,47 @@
 			request.namelen);
 	}
 
-	ret = -EINVAL;
-	inode = nm_get_group_node_by_index(dlm->group, to);
-	if (inode) {
-		dlm_master_request_to_net(&request);
-		ret = net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, 
-				       &request, sizeof(request), 
-				       inode, &response);
-		iput(inode);
-		if (ret >= 0) {
-			spin_lock(&mle->spinlock);
-			switch (response) {
-				case DLM_MASTER_RESP_YES:
-					set_bit(to, mle->response_map);
-					// dlmprintk("woot!  node %u is the "
-					// "master!\n", to);
-					mle->master = to;
-					break;
-				case DLM_MASTER_RESP_NO:
-					// dlmprintk("node %u is not the "
-					// "master, not in-progress\n", to);
-					set_bit(to, mle->response_map);
-					break;
-				case DLM_MASTER_RESP_MAYBE:
-					// dlmprintk("node %u is not the "
-					// "master, but IS in-progress\n", to);
-					set_bit(to, mle->response_map);
-					set_bit(to, mle->maybe_map);
-					break;
-				case DLM_MASTER_RESP_ERROR:
-					dlmprintk("node %u hit an -ENOMEM!  "
-						  "try everything again\n", to);
-					mle->error = 1;
-					break;
-				default:
-					dlmprintk("bad response! %u\n", 
-						  response);
-					ret = -EINVAL;
-					break;
-			}
-			spin_unlock(&mle->spinlock);
-		} else {
-			dlmprintk("net_send_message returned %d!\n", ret);
-		}
-	} else {
-		dlmprintk("nm_get_group_node_by_index failed to find inode "
-			  "for node %d!\n", to);
-	}	
+	dlm_master_request_to_net(&request);
+	ret = net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
+			       sizeof(request), to, &response);
+	if (ret < 0)  {
+		dlmprintk("net_send_message returned %d!\n", ret);
+		goto out;
+	}
+
+	spin_lock(&mle->spinlock);
+	switch (response) {
+		case DLM_MASTER_RESP_YES:
+			set_bit(to, mle->response_map);
+			// dlmprintk("woot!  node %u is the "
+			// "master!\n", to);
+			mle->master = to;
+			break;
+		case DLM_MASTER_RESP_NO:
+			// dlmprintk("node %u is not the "
+			// "master, not in-progress\n", to);
+			set_bit(to, mle->response_map);
+			break;
+		case DLM_MASTER_RESP_MAYBE:
+			// dlmprintk("node %u is not the "
+			// "master, but IS in-progress\n", to);
+			set_bit(to, mle->response_map);
+			set_bit(to, mle->maybe_map);
+			break;
+		case DLM_MASTER_RESP_ERROR:
+			dlmprintk("node %u hit an -ENOMEM!  "
+				  "try everything again\n", to);
+			mle->error = 1;
+			break;
+		default:
+			dlmprintk("bad response! %u\n", 
+				  response);
+			ret = -EINVAL;
+			break;
+	}
+	spin_unlock(&mle->spinlock);
+
+out:
 	return ret;
 }
 
@@ -917,16 +904,16 @@
 		 * the node that called us */
 		memcpy(nodemap, dlm->domain_map, sizeof(nodemap));
 		clear_bit(request->node_idx, nodemap);
-		clear_bit(dlm->group_index, nodemap);
+		clear_bit(dlm->node_num, nodemap);
 		while ((bit = find_next_bit(nodemap, NM_MAX_NODES,
-				    dlm->group_index)) < NM_MAX_NODES) {
+				    dlm->node_num)) < NM_MAX_NODES) {
 			clear_bit(bit, nodemap);
 		}
 		spin_unlock(&dlm->spinlock);
 
 		/* take care of the easy cases up front */
 		spin_lock(&res->spinlock);
-		if (res->owner == dlm->group_index) {
+		if (res->owner == dlm->node_num) {
 			spin_unlock(&res->spinlock);
 			// dlmprintk0("this node is the master\n");
 			response = DLM_MASTER_RESP_YES;
@@ -1061,7 +1048,6 @@
 int dlm_do_assert_master(dlm_ctxt *dlm, const char *lockname, 
 			 unsigned int namelen, void *nodemap)
 {
-	struct inode *inode = NULL;
 	dlm_assert_master assert;
 	int to, tmpret;
 	dlm_node_iter iter;
@@ -1077,24 +1063,13 @@
 	while ((to = dlm_node_iter_next(&iter)) >= 0) {
 		// dlmprintk("sending assert master to %d\n", to);
 		memset(&assert, 0, sizeof(assert));
-		assert.node_idx = dlm->group_index;
+		assert.node_idx = dlm->node_num;
 		assert.namelen = namelen;
 		strncpy(assert.name, lockname, namelen);
 
-		inode = nm_get_group_node_by_index(dlm->group, to);
-		if (!inode) {
-			tmpret = -EINVAL;
-			dlmprintk("could not get nm info for node %d!  "
-				  "need to retry this whole thing\n", to);
-			ret = tmpret;
-			break;
-		}
-
 		dlm_assert_master_to_net(&assert);
 		tmpret = net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key, 
-					  &assert, sizeof(assert), inode, NULL);
-		iput(inode);
-
+					  &assert, sizeof(assert), to, NULL);
 		if (tmpret < 0) {
 			// TODO
 			// dlmprintk("assert_master returned %d!\n", tmpret);

Modified: branches/usysfsify/fs/ocfs2/dlm/dlmmod.c
===================================================================
--- branches/usysfsify/fs/ocfs2/dlm/dlmmod.c	2005-03-16 21:30:19 UTC (rev 1985)
+++ branches/usysfsify/fs/ocfs2/dlm/dlmmod.c	2005-03-16 22:46:41 UTC (rev 1986)
@@ -78,7 +78,6 @@
 LIST_HEAD(dlm_domains);
 spinlock_t dlm_domain_lock = SPIN_LOCK_UNLOCKED;
 DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events);
-u8 dlm_global_index = NM_MAX_NODES;
 static spinlock_t dlm_cookie_lock = SPIN_LOCK_UNLOCKED;
 static u64 dlm_next_cookie = 1;
 
@@ -192,10 +191,6 @@
 	if (status < 0)
 		return -1;
 
-	dlm_global_index = nm_this_node(NULL);
-	if (dlm_global_index == NM_MAX_NODES)
-		return -1;
-
 	status = dlm_register_net_handlers();
 	if (status)
 		return -1;
@@ -303,7 +298,7 @@
 		res = dlm_lockres_grab(dlm, lock->lockres);
 		down_read(&dlm->recovery_sem);
 
-		if (res->owner == dlm->group_index)
+		if (res->owner == dlm->node_num)
 			status = dlmconvert_master(dlm, res, lock, flags, mode);
 		else 
 			status = dlmconvert_remote(dlm, res, lock, flags, mode);
@@ -346,7 +341,7 @@
 		lock->ml.type = mode;
 		lock->ml.convert_type = LKM_IVMODE;
 		lock->ml.highest_blocked = LKM_IVMODE;
-		lock->ml.node = dlm->group_index;
+		lock->ml.node = dlm->node_num;
 		lock->ast = ast;
 		lock->bast = bast;
 		lock->astdata = data;
@@ -367,7 +362,7 @@
 			}
 		}
 
-		if (res->owner == dlm->group_index)
+		if (res->owner == dlm->node_num)
 			status = dlmlock_master(dlm, res, lock, flags);
 		else 
 			status = dlmlock_remote(dlm, res, lock, flags);
@@ -435,7 +430,7 @@
 	DLM_ASSERT(res);
 	dlmprintk("lock=%p res=%p\n", lock, res);
 
-	if (res->owner == dlm->group_index) {
+	if (res->owner == dlm->node_num) {
 		status = dlmunlock_master(dlm, res, lock, lksb, flags, 
 					  &call_ast);
 		dlmprintk("done calling dlmunlock_master: returned %d, "
@@ -593,9 +588,6 @@
 	if (dlm->name)
 		kfree(dlm->name);
 
-	if (dlm->group)
-		iput(dlm->group);
-
 	kfree(dlm);
 }
 
@@ -853,25 +845,16 @@
 				    unsigned int node)
 {
 	int status;
-	struct inode *node_inode;
 	dlm_cancel_join cancel_msg;
 
-	node_inode = nm_get_group_node_by_index(dlm->group, node);
-	if (!node_inode) {
-		status = -EINVAL;
-		dlmprintk("Could not get inode for node %u!\n", node);
-		goto bail;
-	}
-
 	memset(&cancel_msg, 0, sizeof(cancel_msg));
-	cancel_msg.node_idx = dlm->group_index;
+	cancel_msg.node_idx = dlm->node_num;
 	cancel_msg.name_len = strlen(dlm->name);
 	strncpy(cancel_msg.domain, dlm->name, cancel_msg.name_len);
 
 	status = net_send_message(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY,
-				  &cancel_msg, sizeof(cancel_msg), node_inode,
+				  &cancel_msg, sizeof(cancel_msg), node,
 				  NULL);
-	iput(node_inode);
 	if (status < 0) {
 		dlmprintk("net_send_message returned %d!\n", status);
 		goto bail;
@@ -899,7 +882,7 @@
 		if (node >= NM_MAX_NODES)
 			break;
 
-		if (node == dlm->group_index)
+		if (node == dlm->node_num)
 			continue;
 
 		tmpstat = dlm_send_one_join_cancel(dlm, node);
@@ -920,27 +903,18 @@
 {
 	int status, retval;
 	dlm_query_join_request join_msg;
-	struct inode *node_inode;
 
 	dlmprintk("querying node %d\n", node);
 
-	node_inode = nm_get_group_node_by_index(dlm->group, node);
-	if (!node_inode) {
-		status = -EINVAL;
-		dlmprintk("Could not get inode for node %u!\n", node);
-		goto bail;
-	}
-
 	memset(&join_msg, 0, sizeof(join_msg));
-	join_msg.node_idx = dlm->group_index;
+	join_msg.node_idx = dlm->node_num;
 	join_msg.name_len = strlen(dlm->name);
 	strncpy(join_msg.domain, dlm->name, join_msg.name_len);
 
 	dlm_query_join_request_to_net(&join_msg);
 
 	status = net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg,
-				  sizeof(join_msg), node_inode, &retval);
-	iput(node_inode);
+				  sizeof(join_msg), node, &retval);
 	if (status < 0 && status != -ENOPROTOOPT && status != -ENOTCONN) {
 		dlmprintk("net_send_message returned %d!\n", status);
 		goto bail;
@@ -977,33 +951,22 @@
 				    unsigned int node)
 {
 	int status;
-	struct inode *node_inode;
 	dlm_assert_joined assert_msg;
 
 	dlmprintk("Sending join assert to node %u\n", node);
 
-	node_inode = nm_get_group_node_by_index(dlm->group, node);
-	if (!node_inode) {
-		status = -EINVAL;
-		dlmprintk("Could not get inode for node %u!\n", node);
-		goto bail;
-	}
-
 	memset(&assert_msg, 0, sizeof(assert_msg));
-	assert_msg.node_idx = dlm->group_index;
+	assert_msg.node_idx = dlm->node_num;
 	assert_msg.name_len = strlen(dlm->name);
 	strncpy(assert_msg.domain, dlm->name, assert_msg.name_len);
 
 	dlm_assert_joined_to_net(&assert_msg);
 
 	status = net_send_message(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY,
-				  &assert_msg, sizeof(assert_msg), node_inode,
-				  NULL);
-	iput(node_inode);
+				  &assert_msg, sizeof(assert_msg), node, NULL);
 	if (status < 0)
 		dlmprintk("net_send_message returned %d!\n", status);
 
-bail:
 	return status;
 }
 
@@ -1019,7 +982,7 @@
 		if (node >= NM_MAX_NODES)
 			break;
 
-		if (node == dlm->group_index)
+		if (node == dlm->node_num)
 			continue;
 
 		do {
@@ -1075,7 +1038,7 @@
 
 static int dlm_try_to_join_domain(dlm_ctxt *dlm)
 {
-	int status, tmpstat, node;
+	int status = 0, tmpstat, node;
 	struct domain_join_ctxt *ctxt;
 	enum dlm_query_join_response response;
 
@@ -1090,17 +1053,12 @@
 	/* group sem locking should work for us here -- we're already
 	 * registered for heartbeat events so filling this should be
 	 * atomic wrt getting those handlers called. */
-	status = hb_fill_node_map(dlm->live_nodes_map,
-				  sizeof(dlm->live_nodes_map));
-	if (status < 0) {
-		dlmprintk("I couldn't fill my node map!\n");
-		goto bail;
-	}
+	hb_fill_node_map(dlm->live_nodes_map, sizeof(dlm->live_nodes_map));
 
 	spin_lock(&dlm->spinlock);
 	memcpy(ctxt->live_map, dlm->live_nodes_map, sizeof(ctxt->live_map));
 
-	dlm->joining_node = dlm->group_index;
+	dlm->joining_node = dlm->node_num;
 
 	spin_unlock(&dlm->spinlock);
 
@@ -1110,7 +1068,7 @@
 		if (node >= NM_MAX_NODES)
 			break;
 
-		if (node == dlm->group_index)
+		if (node == dlm->node_num)
 			continue;
 
 		status = dlm_request_join(dlm, node, &response);
@@ -1139,7 +1097,7 @@
 	spin_lock(&dlm->spinlock);
 	memcpy(dlm->domain_map, ctxt->yes_resp_map,
 	       sizeof(ctxt->yes_resp_map));
-	set_bit(dlm->group_index, dlm->domain_map);
+	set_bit(dlm->node_num, dlm->domain_map);
 	spin_unlock(&dlm->spinlock);
 
 	dlm_send_join_asserts(dlm, ctxt->yes_resp_map);
@@ -1319,6 +1277,7 @@
 
 	strcpy(dlm->name, domain);
 	dlm->key = key;
+	dlm->node_num = nm_this_node();
 
 	spin_lock_init(&dlm->spinlock);
 	spin_lock_init(&dlm->master_lock);
@@ -1369,6 +1328,11 @@
 		goto leave;
 	}
 
+	if (nm_this_node() == NM_MAX_NODES) {
+		dlmprintk0("a local node has not been configured\n");
+		goto leave;
+	}
+
 	dlmprintk("register called for domain \"%s\"\n", domain);
 
 retry:
@@ -1473,8 +1437,8 @@
 	struct list_head *bucket;
 	int i;
 
-	printk("dlm_ctxt: %s, group=%u, key=%u\n", 
-		  dlm->name, dlm->group_index, dlm->key);
+	printk("dlm_ctxt: %s, node=%u, key=%u\n", 
+		  dlm->name, dlm->node_num, dlm->key);
 	printk("some bug here... should not have to check for this...\n");
 	if (!dlm || !dlm->name) {
 		printk("wtf... dlm=%p\n", dlm);

Modified: branches/usysfsify/fs/ocfs2/dlm/dlmmod.h
===================================================================
--- branches/usysfsify/fs/ocfs2/dlm/dlmmod.h	2005-03-16 21:30:19 UTC (rev 1985)
+++ branches/usysfsify/fs/ocfs2/dlm/dlmmod.h	2005-03-16 22:46:41 UTC (rev 1986)
@@ -212,6 +212,7 @@
 	spinlock_t spinlock;
 	struct rw_semaphore recovery_sem;
 	char *name;
+	u8 node_num;
 	u32 key;
 	u8  joining_node;
 	unsigned long live_nodes_map[BITS_TO_LONGS(NM_MAX_NODES)];

Modified: branches/usysfsify/fs/ocfs2/dlm/dlmrecovery.c
===================================================================
--- branches/usysfsify/fs/ocfs2/dlm/dlmrecovery.c	2005-03-16 21:30:19 UTC (rev 1985)
+++ branches/usysfsify/fs/ocfs2/dlm/dlmrecovery.c	2005-03-16 22:46:41 UTC (rev 1986)
@@ -83,7 +83,7 @@
 				res->state |= DLM_LOCK_RES_RECOVERING;
 				list_del(&res->recovering);
 				list_add_tail(&res->recovering, &dlm->reco.resources);
-			} else if (res->owner == dlm->group_index) {
+			} else if (res->owner == dlm->node_num) {
 				list_for_each_safe(iter2, tmpiter, &res->granted) {
 					lock = list_entry (iter2, dlm_lock, list);
 					if (lock->ml.node == dead_node) {
@@ -113,7 +113,7 @@
 }
 
 
-void dlm_hb_node_down_cb(struct inode *group, struct inode *node, int idx, void *data)
+void dlm_hb_node_down_cb(struct nm_node *node, int idx, void *data)
 {
 	dlm_ctxt *dlm = data;
 	dlm_master_list_entry *mle;
@@ -135,7 +135,7 @@
 	/* notify any mles attached to the heartbeat events */
 	list_for_each(iter, &dlm->mle_hb_events) {
 		mle = list_entry(iter, dlm_master_list_entry, hb_events);
-		dlm_mle_node_down(dlm, mle, group, node, idx);
+		dlm_mle_node_down(dlm, mle, node, idx);
 	}
 
 	if (!test_bit(idx, dlm->domain_map)) {
@@ -161,7 +161,7 @@
 	dlm_put(dlm);
 }
 
-void dlm_hb_node_up_cb(struct inode *group, struct inode *node, int idx, void *data)
+void dlm_hb_node_up_cb(struct nm_node *node, int idx, void *data)
 {
 	dlm_ctxt *dlm = data;
 	dlm_master_list_entry *mle;
@@ -177,7 +177,7 @@
 	/* notify any mles attached to the heartbeat events */
 	list_for_each(iter, &dlm->mle_hb_events) {
 		mle = list_entry(iter, dlm_master_list_entry, hb_events);
-		dlm_mle_node_up(dlm, mle, group, node, idx);
+		dlm_mle_node_up(dlm, mle, node, idx);
 	}
 
 	spin_unlock(&dlm->spinlock);

Modified: branches/usysfsify/fs/ocfs2/dlm/dlmthread.c
===================================================================
--- branches/usysfsify/fs/ocfs2/dlm/dlmthread.c	2005-03-16 21:30:19 UTC (rev 1985)
+++ branches/usysfsify/fs/ocfs2/dlm/dlmthread.c	2005-03-16 22:46:41 UTC (rev 1986)
@@ -53,7 +53,6 @@
 
 extern spinlock_t dlm_domain_lock;
 extern struct list_head dlm_domains;
-extern u8 dlm_global_index;
 
 static int dlm_thread(void *data);
 struct task_struct *dlm_thread_task;
@@ -118,7 +117,7 @@
 	/* Since we can't migrate locks yet, for now we only handle
 	 * non locally mastered locks. */
 	spin_lock(&lockres->spinlock);
-	master = lockres->owner == dlm->group_index;
+	master = lockres->owner == dlm->node_num;
 	spin_unlock(&lockres->spinlock);
 
 	dlmprintk("purging lockres %.*s, master = %d\n", lockres->lockname.len,
@@ -184,9 +183,8 @@
 void dlm_shuffle_lists(dlm_ctxt *dlm, dlm_lock_resource *res)
 {
 	dlm_lock *lock, *target;
-	struct list_head *iter, *tmpiter;
+	struct list_head *iter;
 	struct list_head *head;
-	s8 hi;
 	int can_grant = 1;
 
 	dlmprintk("shuffle res %.*s\n", res->lockname.len, res->lockname.name);
@@ -328,7 +326,7 @@
 		spin_lock(&res->spinlock);
 
 		/* don't shuffle secondary queues */
-		if ((res->owner == dlm->group_index) &&
+		if ((res->owner == dlm->node_num) &&
 		    !(res->state & DLM_LOCK_RES_DIRTY)) {
 			list_add_tail(&res->dirty, &dlm->dirty_list);
 			res->state |= DLM_LOCK_RES_DIRTY;
@@ -399,7 +397,7 @@
 		dlmprintk0("delivering an ast for this lockres\n");
 
 		list_del_init(&lock->ast_list);
-		if (lock->ml.node != dlm->group_index) {
+		if (lock->ml.node != dlm->node_num) {
 			if (dlm_do_remote_ast(dlm, res, lock) < 0)
 				dlmprintk("eek\n");
 		} else
@@ -421,7 +419,7 @@
 
 		dlmprintk("delivering a bast for this lockres "
 			  "(blocked = %d\n", hi);
-		if (lock->ml.node != dlm->group_index) {
+		if (lock->ml.node != dlm->node_num) {
 			if (dlm_send_proxy_bast(dlm, res, lock, hi) < 0)
 				dlmprintk0("eeek\n");
 		} else
@@ -452,7 +450,7 @@
 			spin_lock(&res->spinlock);
 			list_del_init(&res->dirty);
 			res->state &= ~DLM_LOCK_RES_DIRTY;
-			BUG_ON(res->owner != dlm->group_index);
+			BUG_ON(res->owner != dlm->node_num);
 			spin_unlock(&res->spinlock);
 
 			dlm_shuffle_lists(dlm, res);

Modified: branches/usysfsify/fs/ocfs2/dlm/dlmunlock.c
===================================================================
--- branches/usysfsify/fs/ocfs2/dlm/dlmunlock.c	2005-03-16 21:30:19 UTC (rev 1985)
+++ branches/usysfsify/fs/ocfs2/dlm/dlmunlock.c	2005-03-16 22:46:41 UTC (rev 1986)
@@ -90,9 +90,9 @@
 		  flags & LKM_VALBLK);
 
 	if (master_node)
-		DLM_ASSERT(res->owner == dlm->group_index);
+		DLM_ASSERT(res->owner == dlm->node_num);
 	else
-		DLM_ASSERT(res->owner != dlm->group_index);
+		DLM_ASSERT(res->owner != dlm->node_num);
 
 	spin_lock(&dlm->spinlock);
 	/* We want to be sure that we're not freeing a lock
@@ -201,7 +201,6 @@
 						 dlm_lockstatus *lksb, 
 						 int flags)
 {
-	struct inode *inode = NULL;
 	dlm_unlock_lock unlock;
 	int tmpret;
 	dlm_status ret;
@@ -212,7 +211,7 @@
 	dlmprintk0("\n");
 
 	memset(&unlock, 0, sizeof(unlock));
-	unlock.node_idx = dlm->group_index;
+	unlock.node_idx = dlm->node_num;
 	unlock.flags = flags;
 	unlock.cookie = lock->ml.cookie;
 	unlock.namelen = res->lockname.len;
@@ -228,27 +227,21 @@
 		iovlen++;
 	}
 
-	ret = DLM_NOLOCKMGR;
-	lksb->status = DLM_NOLOCKMGR;
-	inode = nm_get_group_node_by_index(dlm->group, res->owner);
-	if (inode) {
-		dlm_unlock_lock_to_net(&unlock);
-		tmpret = net_send_message_iov(DLM_UNLOCK_LOCK_MSG, dlm->key, 
-					      iov, iovlen, inode, &status);
-		if (tmpret >= 0) {
-			// successfully sent and received
-			if (status == DLM_CANCELGRANT)
-				ret = DLM_NORMAL;
-			else
-				ret = status;
-			lksb->status = status;
-		} else {
-			dlmprintk("error occurred in net_send_message: %d\n",
-				  tmpret);
-			ret = dlm_err_to_dlm_status(tmpret);
-			lksb->status = ret;
-		}
-		iput(inode);
+	dlm_unlock_lock_to_net(&unlock);
+	tmpret = net_send_message_iov(DLM_UNLOCK_LOCK_MSG, dlm->key, 
+				      iov, iovlen, res->owner, &status);
+	if (tmpret >= 0) {
+		// successfully sent and received
+		if (status == DLM_CANCELGRANT)
+			ret = DLM_NORMAL;
+		else
+			ret = status;
+		lksb->status = status;
+	} else {
+		dlmprintk("error occurred in net_send_message: %d\n",
+			  tmpret);
+		ret = dlm_err_to_dlm_status(tmpret);
+		lksb->status = ret;
 	}
 
 	return ret;

Modified: branches/usysfsify/fs/ocfs2/dlm/userdlm.c
===================================================================
--- branches/usysfsify/fs/ocfs2/dlm/userdlm.c	2005-03-16 21:30:19 UTC (rev 1985)
+++ branches/usysfsify/fs/ocfs2/dlm/userdlm.c	2005-03-16 22:46:41 UTC (rev 1986)
@@ -636,7 +636,7 @@
 
 	snprintf(domain, name->len + 1, "%.*s", name->len, name->name);
 
-	dlm = dlm_register_domain(domain, domain, dlm_key);
+	dlm = dlm_register_domain(domain, dlm_key);
 
 	kfree(domain);
 	return dlm;

Modified: branches/usysfsify/fs/usysfs/mount.c
===================================================================
--- branches/usysfsify/fs/usysfs/mount.c	2005-03-16 21:30:19 UTC (rev 1985)
+++ branches/usysfsify/fs/usysfs/mount.c	2005-03-16 22:46:41 UTC (rev 1986)
@@ -68,9 +68,9 @@
 }
 
 void ukset_init_type_name(struct ukset *ukset, const char *name,
-			  struct ktype *ktype)
+			  struct kobj_type *ktype)
 {
-	kobject_set_name(&ukset->kobj, name);
+	kobject_set_name(&ukset->kset.kobj, name);
 	ukset->kset.kobj.ktype = ktype;
 	ukset_init(ukset);
 }



More information about the Ocfs2-commits mailing list