[Ocfs2-commits] khackel commits r1786 - trunk/cluster

svn-commits at oss.oracle.com svn-commits at oss.oracle.com
Tue Jan 18 14:40:56 CST 2005


Author: khackel
Date: 2005-01-18 14:40:55 -0600 (Tue, 18 Jan 2005)
New Revision: 1786

Modified:
   trunk/cluster/nodemanager.c
Log:
put to 80 column

Modified: trunk/cluster/nodemanager.c
===================================================================
--- trunk/cluster/nodemanager.c	2005-01-18 01:50:02 UTC (rev 1785)
+++ trunk/cluster/nodemanager.c	2005-01-18 20:40:55 UTC (rev 1786)
@@ -106,33 +106,35 @@
  * do disk heartbeating only for those groups with valid UUIDs.)  
  *
  * USAGE:
- * For our purposes, the nm service can be autoloaded by an fstab entry or manually
- * through mount (mount -t nm none /proc/cluster/nm).  Once that is done, an init
- * script (or single executable on an initrd) should be run to create the static
- * cluster info, possibly from a file like /etc/nm.conf or similar.  We should 
- * probably create a "dlm" or "everyone" group (with NO disk heartbeating) so that 
- * the dlm service can be used with the network only.  This group should contain 
- * all known nodes.  After this is done, the net, hb and dlm modules can come up.
- * The nm service is now ready for use, since groups don't need to be created till 
- * later.
+ * For our purposes, the nm service can be autoloaded by an fstab entry or 
+ * manually through mount (mount -t nm none /proc/cluster/nm).  Once that is 
+ * done, an init script (or single executable on an initrd) should be run to 
+ * create the static cluster info, possibly from a file like /etc/nm.conf or 
+ * similar.  We should probably create a "dlm" or "everyone" group (with NO disk
+ * heartbeating) so that the dlm service can be used with the network only.  
+ * This group should contain all known nodes.  After this is done, the net, hb 
+ * and dlm modules can come up. The nm service is now ready for use, since 
+ * groups don't need to be created till later.
  * 
- * A group services daemon can be written (by someone!? ;-) to run at this point.
- * Since the "dlm" group has everything it needs for full dlmming (since it uses 
- * only network), the dlm itself can be used to arbitrate for group creation, 
- * and additions/deletions from groups.  Callbacks should be registered with nm by
- * other services that care on each of these events.  For instance, heartbeat should
- * register a callback with nm for group creation, and addition and deletion from 
- * a group so that it can make any necessary changes to its heartbeating (primarily
- * so that it can begin/end disk heartbeat for any group/node that needs it).
+ * A group services daemon can be written (by someone!? ;-) to run at this 
+ * point. Since the "dlm" group has everything it needs for full dlmming (since
+ * it uses only network), the dlm itself can be used to arbitrate for group 
+ * creation, and additions/deletions from groups.  Callbacks should be 
+ * registered with nm by other services that care on each of these events.  For
+ * instance, heartbeat should register a callback with nm for group creation, 
+ * and addition and deletion from a group so that it can make any necessary 
+ * changes to its heartbeating (primarily so that it can begin/end disk 
+ * heartbeat for any group/node that needs it).
  *   
  * NOTE NOTE NOTE !!!!:
- * This is intended to be a quickie implementation.  (translation: lame)  I do not
- * want to step on anyone's toes who may have implemented something wayyy better.
- * If something out there "wins", we will plug into that instead.  If nothing really
- * takes off, we at least have a (lame) reference to work off of.  However, since this 
- * implementation exists solely to make ocfs2 work, and one of the major advantages
- * of ocfs version 1 was ease of setup, we don't want to move to something 
- * substantially more complicated than this (one conf file).
+ * This is intended to be a quickie implementation.  (translation: lame)  I do
+ * not want to step on anyone's toes who may have implemented something wayyy 
+ * better.  If something out there "wins", we will plug into that instead.  If 
+ * nothing really takes off, we at least have a (lame) reference to work off of.
+ * However, since this implementation exists solely to make ocfs2 work, and one
+ * of the major advantages of ocfs version 1 was ease of setup, we don't want 
+ * to move to something substantially more complicated than this (one conf 
+ * file).
  *
  */ 
 
@@ -253,7 +255,8 @@
 
 
 
-static struct dentry * nm_add_file(struct super_block *s, struct dentry *parent, struct tree_descr *file, int ino)
+static struct dentry * nm_add_file(struct super_block *s, struct dentry *parent,
+				   struct tree_descr *file, int ino)
 {
 	struct qstr name;
 	struct dentry *dentry = ERR_PTR(-EINVAL);
@@ -296,7 +299,8 @@
 }
 
 
-static struct dentry * nm_add_link(struct super_block *s, struct dentry *parent, struct tree_descr *file, int ino)
+static struct dentry * nm_add_link(struct super_block *s, struct dentry *parent,
+				   struct tree_descr *file, int ino)
 {
 	struct qstr name;
 	struct dentry *dentry = ERR_PTR(-EINVAL);
@@ -376,7 +380,8 @@
 	inode = nm_get_group_by_name(data->arg_u.gc.name);
 	if (inode) {
 		ret = sprintf(buf, "%d: group %u (%s) already exists", -EEXIST, 
-			      nm_get_group_global_index(inode), data->arg_u.gc.name);
+			      nm_get_group_global_index(inode), 
+			      data->arg_u.gc.name);
 		iput(inode);
 		return ret;
 	}
@@ -386,7 +391,8 @@
 		goto leave;
 
 	spin_lock(&cluster.bitmap_lock);
-	group_num = nm_find_next_slot(&(cluster.group_bitmap[0]), 255, group_num);
+	group_num = nm_find_next_slot(&(cluster.group_bitmap[0]), 255, 
+				      group_num);
 	spin_unlock(&cluster.bitmap_lock);
 
 	if (group_num < 0) {
@@ -519,11 +525,13 @@
 	bucket = hash_long(n->node.ifaces[0].addr_u.ip_addr4, NM_HASH_BITS);
 	list_add_tail(&n->ip_hash, &nm_ip_hash[bucket]);
 	spin_unlock(&nm_ip_hash_lock);
-	nmprintk("hashed ip %d.%d.%d.%d to bucket %d\n", NIPQUAD(n->node.ifaces[0].addr_u.ip_addr4), bucket);
+	nmprintk("hashed ip %d.%d.%d.%d to bucket %d\n", 
+		 NIPQUAD(n->node.ifaces[0].addr_u.ip_addr4), bucket);
 	n->inode = inode;
 	inode->u.generic_ip = n;
 
-	ret = sprintf(buf, "0: node %u (%s) added", node_num, n->node.node_name);
+	ret = sprintf(buf, "0: node %u (%s) added", node_num, 
+		      n->node.node_name);
 	nm_do_callbacks(NM_NODE_ADD_CB, inode, NULL, node_num);
 
 leave:
@@ -588,7 +596,8 @@
 		goto leave;
 
 	if (g->state == NM_GROUP_NOT_READY) {
-		ret = sprintf(buf, "%d: group disk has not been discovered.  cannot add nodes.", -EROFS);
+		ret = sprintf(buf, "%d: group disk has not been discovered.  "
+			      "cannot add nodes.", -EROFS);
 		goto leave;
 	}
 
@@ -689,7 +698,8 @@
 	nmprintk("name cluster...\n");
 	spin_lock(&nm_lock);
 	if (cluster.state == NM_CLUSTER_UP) {
-		ret = sprintf(buf, "%d: cluster name could not be set.  cluster already up.", -EINVAL);
+		ret = sprintf(buf, "%d: cluster name could not be set.  "
+			      "cluster already up.", -EINVAL);
 		goto leave;
 	}
 	memset(cluster.name, 0, NM_MAX_NAME_LEN+1);
@@ -817,7 +827,8 @@
 	node_num = data->arg_u.index;
 	inode = __nm_get_node_by_num(node_num);
 	if (inode) {
-		dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias);
+		dentry = list_entry(inode->i_dentry.next, struct dentry, 
+				    d_alias);
 		priv = inode->u.generic_ip;
 		ret = sprintf(buf, "0: global_index=%u\n"
 			           "name=%*s\n",
@@ -827,7 +838,8 @@
 		for (i=0; i<NM_MAX_IFACES; i++) {
 			n = &priv->node.ifaces[i];
 			vers = ntohs(n->ip_version);
-			nmprintk("ip_version=%u, vers=%u\n", n->ip_version, vers);
+			nmprintk("ip_version=%u, vers=%u\n", 
+				 n->ip_version, vers);
 			if (vers!=4 && vers!=6)
 				continue;
 			/* TODO: how to print ipv6? */
@@ -857,7 +869,8 @@
 	inode = __nm_get_group_by_num(group_num);
 	if (inode) {
 		g = inode->u.generic_ip;
-		dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias);
+		dentry = list_entry(inode->i_dentry.next, struct dentry, 
+				    d_alias);
 		ret = sprintf(buf, "0: group_num=%u\n"
 		        	   "name=%*s\n"
 				   "disk_uuid=%s\n",
@@ -921,7 +934,8 @@
 			ret = sprintf(buf, "%d: %u", ret, me);
 			break;
 		default:
-			ret = sprintf(buf, "%d: bad opcode: %u", -EINVAL, data->opcode);
+			ret = sprintf(buf, "%d: bad opcode: %u", -EINVAL, 
+				      data->opcode);
 			break;
 	}
 	nmprintk("leaving!\n");
@@ -946,7 +960,8 @@
 			ret = nm_get_node_info(buf, data);
 			break;
 		default:
-			ret = sprintf(buf, "%d: bad opcode: %u", -EINVAL, data->opcode);
+			ret = sprintf(buf, "%d: bad opcode: %u", -EINVAL, 
+				      data->opcode);
 			break;
 	}
 	nmprintk("leaving!\n");
@@ -966,7 +981,8 @@
 	if (data->magic != NM_OP_MAGIC)
 		return -EINVAL;
 
-	nmprintk("opcode is %u, add_group is %u\n", data->opcode, NM_OP_ADD_GROUP_NODE);
+	nmprintk("opcode is %u, add_group is %u\n", data->opcode, 
+		 NM_OP_ADD_GROUP_NODE);
 	switch (data->opcode) {
 		case NM_OP_GET_GROUP_INFO:
 			ret = nm_get_group_info(buf, data);
@@ -977,7 +993,8 @@
 			break;
 
 		default:
-			ret = sprintf(buf, "%d: bad opcode: %u", -EINVAL, data->opcode);
+			ret = sprintf(buf, "%d: bad opcode: %u", 
+				      -EINVAL, data->opcode);
 			break;
 	}
 	nmprintk("leaving!\n");
@@ -1100,7 +1117,8 @@
 	NM_ASSERT(single_sb);
 	NM_ASSERT(single_sb->s_root);
 
-	dentry = lookup_one_len(node_name, single_sb->s_root, strlen(node_name));
+	dentry = lookup_one_len(node_name, single_sb->s_root, 
+				strlen(node_name));
 	if (!IS_ERR(dentry)) {
 		inode = dentry->d_inode;
 		if (inode) {
@@ -1148,7 +1166,8 @@
 EXPORT_SYMBOL(nm_this_node);
 
 /* slow */
-static u16 nm_get_group_index(struct inode *group, struct inode *inode, struct dentry **child)
+static u16 nm_get_group_index(struct inode *group, struct inode *inode, 
+			      struct dentry **child)
 {
 	struct dentry *tmp = NULL, *parent = NULL;
 	u16 slot_num = NM_MAX_NODES;
@@ -1293,7 +1312,8 @@
 	};
 	
 	sz = sizeof(nm_files) / sizeof(struct tree_descr);
-	ops = kmalloc(sizeof(TA_write_ops) + (sz * sizeof(TA_write_op *)), GFP_KERNEL);
+	ops = kmalloc(sizeof(TA_write_ops) + (sz * sizeof(TA_write_op *)), 
+		      GFP_KERNEL);
 	if (!ops)
 		return -ENOMEM;
 



More information about the Ocfs2-commits mailing list