[Ocfs2-commits] zab commits r2080 - trunk/fs/ocfs2/cluster

svn-commits at oss.oracle.com svn-commits at oss.oracle.com
Wed Mar 30 11:51:19 CST 2005


Author: zab
Signed-off-by: mfasheh
Date: 2005-03-30 11:51:17 -0600 (Wed, 30 Mar 2005)
New Revision: 2080

Modified:
   trunk/fs/ocfs2/cluster/nodemanager.c
   trunk/fs/ocfs2/cluster/nodemanager.h
Log:
o we don't have rational node lifetimes defined yet.  This at least stops
  people from re-writing node attributes while they're in use and enforces
  the required ordering that we currently have in place of commitable 
  configfs items.  
o also get rid of some silly printks and bad freeing behaviour while in there

Signed-off-by: mfasheh


Modified: trunk/fs/ocfs2/cluster/nodemanager.c
===================================================================
--- trunk/fs/ocfs2/cluster/nodemanager.c	2005-03-30 08:34:23 UTC (rev 2079)
+++ trunk/fs/ocfs2/cluster/nodemanager.c	2005-03-30 17:51:17 UTC (rev 2080)
@@ -104,8 +104,8 @@
 
 	read_lock(&nm_single_cluster->cl_nodes_lock);
 	node = nm_single_cluster->cl_nodes[node_num];
-	if (node) /* XXX get a ref */
-		;
+	if (node)
+		uobject_get(&node->nd_uobj);
 	read_unlock(&nm_single_cluster->cl_nodes_lock);
 out:
 	return node;
@@ -170,8 +170,8 @@
 
 	read_lock(&cluster->cl_nodes_lock);
 	node = nm_node_ip_tree_lookup(cluster, addr, NULL, NULL);
-	if (node) /* XXX get a ref */
-		;
+	if (node)
+		uobject_get(&node->nd_uobj);
 	read_unlock(&cluster->cl_nodes_lock);
 
 out:
@@ -181,7 +181,7 @@
 
 void nm_node_put(struct nm_node *node)
 {
-	/* XXX do something */
+	uobject_put(&node->nd_uobj);
 }
 EXPORT_SYMBOL(nm_node_put);
 
@@ -214,8 +214,7 @@
 static void nm_node_release(struct uobject *uobj)
 {
 	struct nm_node *node = to_nm_node(uobj);
-	printk("releasing node %p\n", node);
-	/* FIXME: must free! */
+	kfree(node);
 }
 
 static ssize_t nm_node_num_read(struct nm_node *node, char *page)
@@ -230,6 +229,13 @@
 	return to_nm_cluster(node->nd_uobj.parent->parent);
 }
 
+enum {
+	NM_NODE_ATTR_NUM = 0,
+	NM_NODE_ATTR_PORT,
+	NM_NODE_ATTR_ADDRESS,
+	NM_NODE_ATTR_LOCAL,
+};
+
 static ssize_t nm_node_num_write(struct nm_node *node, const char *page,
 				 size_t count)
 {
@@ -244,6 +250,14 @@
 	if (tmp >= NM_MAX_NODES)
 		return -ERANGE;
 
+	/* once we're in the cl_nodes tree networking can look us up by
+	 * node number and try to use our address and port attributes
+	 * to connect to this node.. make sure that they've been set
+	 * before writing the node attribute? */
+	if (!test_bit(NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
+	    !test_bit(NM_NODE_ATTR_PORT, &node->nd_set_attributes))
+		return -EINVAL; /* XXX */
+
 	write_lock(&cluster->cl_nodes_lock);
 	if (cluster->cl_nodes[tmp])
 		p = NULL;
@@ -288,8 +302,6 @@
 	return sprintf(page, "%u.%u.%u.%u\n", NIPQUAD(node->nd_ipv4_address));
 }
 
-/* XXX this is acting as commit until commit really lands.. all this will be
- * hoisted into the commit method */
 static ssize_t nm_node_ipv4_address_write(struct nm_node *node,
 					  const char *page,
 					  size_t count)
@@ -347,11 +359,18 @@
 
 	tmp = !!tmp; /* boolean of whether this node wants to be local */
 
+	/* setting local turns on networking rx for now so we require having
+	 * set everything else first */
+	if (!test_bit(NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
+	    !test_bit(NM_NODE_ATTR_NUM, &node->nd_set_attributes) ||
+	    !test_bit(NM_NODE_ATTR_PORT, &node->nd_set_attributes))
+		return -EINVAL; /* XXX */
+
 	/* the only failure case is trying to set a new local node
 	 * when a different one is already set */
 	if (tmp && tmp == cluster->cl_has_local &&
 	    cluster->cl_local_node != node->nd_num)
-			return -EBUSY;
+		return -EBUSY;
 
 	/* bring up the rx thread if we're setting the new local
 	 * node.  XXX make sure port/addr are set */
@@ -403,13 +422,24 @@
 	.store	= nm_node_local_write,
 };
 static struct configfs_attribute *nm_node_attrs[] = {
-	&nm_node_attr_num.attr,
-	&nm_node_attr_ipv4_port.attr,
-	&nm_node_attr_ipv4_address.attr,
-	&nm_node_attr_local.attr,
+	[NM_NODE_ATTR_NUM] = &nm_node_attr_num.attr,
+	[NM_NODE_ATTR_PORT] = &nm_node_attr_ipv4_port.attr,
+	[NM_NODE_ATTR_ADDRESS] = &nm_node_attr_ipv4_address.attr,
+	[NM_NODE_ATTR_LOCAL] = &nm_node_attr_local.attr,
 	NULL,
 };
 
+static int nm_attr_index(struct configfs_attribute *attr)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(nm_node_attrs); i++) {
+		if (attr == nm_node_attrs[i])
+			return i;
+	}
+	BUG();
+	return 0;
+}
+
 static ssize_t nm_node_show(struct uobject *uobj,
 			    struct configfs_attribute *attr,
 			    char *page)
@@ -431,10 +461,23 @@
 	struct nm_node *node = to_nm_node(uobj);
 	struct nm_node_attribute *nm_node_attr =
 		container_of(attr, struct nm_node_attribute, attr);
-	ssize_t ret = -EINVAL;
+	ssize_t ret;
+	int attr_index = nm_attr_index(attr);
 
-	if (nm_node_attr->store)
-		ret = nm_node_attr->store(node, page, count);
+	if (nm_node_attr->store == NULL) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (test_bit(attr_index, &node->nd_set_attributes))
+		return -EBUSY;
+
+	ret = nm_node_attr->store(node, page, count);
+	if (ret < count)
+		goto out;
+
+	set_bit(attr_index, &node->nd_set_attributes);
+out:
 	return ret;
 }
 
@@ -470,12 +513,9 @@
 					      const char *name)
 {
 	struct nm_node *node = NULL;
-	struct nm_cluster *cluster = to_nm_cluster(uset->uobj.parent);
 	struct uobject *ret = NULL;
 	net_inode_private *nip;
 
-	printk("trying to make a node object under cluster %p\n", cluster);
-
 	if (strlen(name) > NM_MAX_NAME_LEN)
 		goto out; /* ENAMETOOLONG */
 
@@ -484,7 +524,6 @@
 		goto out; /* ENOMEM */
 
 	strcpy(node->nd_name, name); /* use uobj.name instead? */
-	node->nd_num = NM_MAX_NODES;
 
 	/* this should be somewhere else */
 	nip = &node->nd_net_inode_private;
@@ -516,11 +555,11 @@
 {
 	struct nm_node *node = to_nm_node(uobj);
 	struct nm_cluster *cluster = to_nm_cluster(uset->uobj.parent);
-	int node_not_in_nodes_array = 0;
 
 	net_stop_node_sock(node);
 
-	if (cluster->cl_has_local && cluster->cl_local_node == node->nd_num) {
+	if (cluster->cl_has_local &&
+	    (cluster->cl_local_node == node->nd_num)) {
 		cluster->cl_has_local = 0;
 		cluster->cl_local_node = NM_MAX_NODES;
 		net_stop_rx_thread(node);
@@ -534,19 +573,13 @@
 	if (node->nd_ipv4_address)
 		rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree);
 
-	if (node->nd_num != NM_MAX_NODES) {
-		if (cluster->cl_nodes[node->nd_num] != node)
-			node_not_in_nodes_array = 1;
-		else  {
-			cluster->cl_nodes[node->nd_num] = NULL;
-			node->nd_num = NM_MAX_NODES;
-			clear_bit(node->nd_num, cluster->cl_nodes_bitmap);
-		}
+	/* nd_num might be 0 if the node number hasn't been set.. */
+	if (cluster->cl_nodes[node->nd_num] == node) {
+		cluster->cl_nodes[node->nd_num] = NULL;
+		clear_bit(node->nd_num, cluster->cl_nodes_bitmap);
 	}
 	write_unlock(&cluster->cl_nodes_lock);
 
-	BUG_ON(node_not_in_nodes_array);
-
 	uobject_put(uobj);
 }
 
@@ -566,8 +599,6 @@
 {
 	struct nm_cluster *cluster = to_nm_cluster(uobj);
 
-	printk("releasing cluster %p\n", cluster);
-
 	kfree(cluster->cl_uset.default_sets);
 	kfree(cluster);
 }
@@ -605,8 +636,6 @@
 	struct uset *hb_uset = NULL, *ret = NULL;
 	void *defs = NULL;
 
-	printk("trying to make a cluster object\n");
-
 	/* this runs under the parent dir's i_sem; there can be only
 	 * one caller in here at a time */
 	if (nm_single_cluster)

Modified: trunk/fs/ocfs2/cluster/nodemanager.h
===================================================================
--- trunk/fs/ocfs2/cluster/nodemanager.h	2005-03-30 08:34:23 UTC (rev 2079)
+++ trunk/fs/ocfs2/cluster/nodemanager.h	2005-03-30 17:51:17 UTC (rev 2080)
@@ -85,6 +85,8 @@
 	/* there can be only one local node for now */
 	int			nd_local;
 
+	unsigned long		nd_set_attributes;
+
 	/* we're making simple assertions that a node can only have one network
 	 * identity and report at one place in a heartbeat */
 	net_inode_private	nd_net_inode_private;



More information about the Ocfs2-commits mailing list