[Ocfs2-commits] khackel commits r1702 - in trunk: cluster src

svn-commits at oss.oracle.com svn-commits at oss.oracle.com
Mon Dec 13 23:49:18 CST 2004


Author: khackel
Date: 2004-12-13 23:49:17 -0600 (Mon, 13 Dec 2004)
New Revision: 1702

Modified:
   trunk/cluster/Makefile
   trunk/cluster/compat_libfs.c
   trunk/cluster/compat_libfs.h
   trunk/cluster/dlmcommon.h
   trunk/cluster/dlmmaster.c
   trunk/cluster/dlmmod.c
   trunk/cluster/dlmmod.h
   trunk/cluster/dlmrecovery.c
   trunk/cluster/dlmthread.c
   trunk/cluster/heartbeat.c
   trunk/cluster/nodemanager.c
   trunk/cluster/tcp.c
   trunk/cluster/tcp.h
   trunk/cluster/warning_hack.h
   trunk/src/dlmglue.c
   trunk/src/vote.c
Log:
* fixed dumb assert in dlm thread runner
* fixed dumb bug in dlm thread runner in choosing highest blocked level for bast
* made logging more consistent across all the modules
* applied zab's patch for making things work on 2.6



Modified: trunk/cluster/Makefile
===================================================================
--- trunk/cluster/Makefile	2004-12-09 23:19:25 UTC (rev 1701)
+++ trunk/cluster/Makefile	2004-12-14 05:49:17 UTC (rev 1702)
@@ -170,13 +170,13 @@
 
 INSTALL_MODULE = ocfs2.ko
 
-#ALL_RULES = stamp-md5 build-ocfs
+#ALL_RULES = stamp-md5 build-cluster
 ALL_RULES = build-cluster
 
-build-ocfs:
+build-cluster:
 	$(MAKE) -C $(KERNELDIR) M=$(CURDIR) modules
 
-clean-ocfs:
+clean-cluster:
 	$(MAKE) -C $(KERNELDIR) M=$(CURDIR) clean
 
 endif # OCFS_KERNEL_2_6
@@ -210,8 +210,8 @@
 # various parameters for the kernel make system and then it will take care of
 # building us.
 
-STAMP_DIR = $(OCFS_SRC_DIR)
-include $(OCFS_SRC_DIR)/../Versioning.make
+STAMP_DIR = $(CLUSTER_SRC_DIR)
+include $(CLUSTER_SRC_DIR)/../Versioning.make
 
 EXTRA_CFLAGS += $(GLOBAL_DEFINES)
 

Modified: trunk/cluster/compat_libfs.c
===================================================================
--- trunk/cluster/compat_libfs.c	2004-12-09 23:19:25 UTC (rev 1701)
+++ trunk/cluster/compat_libfs.c	2004-12-14 05:49:17 UTC (rev 1702)
@@ -18,11 +18,14 @@
 
 #include "compat_libfs.h"
 
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
 #define kstatfs statfs
+#endif
+
 #define __user
 
 
-int simple_statfs(struct super_block *sb, struct statfs *buf);
+int simple_statfs(struct super_block *sb, struct kstatfs *buf);
 
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
 struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd);

Modified: trunk/cluster/compat_libfs.h
===================================================================
--- trunk/cluster/compat_libfs.h	2004-12-09 23:19:25 UTC (rev 1701)
+++ trunk/cluster/compat_libfs.h	2004-12-14 05:49:17 UTC (rev 1702)
@@ -1,6 +1,8 @@
 #ifndef CLUSTER_COMPAT_LIBFS_H
 #define CLUSTER_COMPAT_LIBFS_H
 
+#include <linux/version.h>
+
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
 #define TA_GENERIC_SB_MEMBER(sb)  ((sb)->s_fs_info)
 #else
@@ -23,12 +25,14 @@
 	TA_write_op *write_op[0];
 } TA_write_ops;
 
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
 struct tree_descr 
 { 
 	char *name; 
 	struct file_operations *ops; 
 	int mode; 
 };
+#endif
 
 int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files);
 struct dentry * simple_find_child(struct dentry *dentry, struct qstr *name);

Modified: trunk/cluster/dlmcommon.h
===================================================================
--- trunk/cluster/dlmcommon.h	2004-12-09 23:19:25 UTC (rev 1701)
+++ trunk/cluster/dlmcommon.h	2004-12-14 05:49:17 UTC (rev 1702)
@@ -41,7 +41,7 @@
 	// uuid of disk
 	char uuid[CLUSTER_DISK_UUID_LEN+1];
 	// all the rest are for heartbeat
-	kdev_t dev;
+	dev_t dev;
 	u32 blocksize_bits;
 	u32 num_blocks;
 	u64 start_block;

Modified: trunk/cluster/dlmmaster.c
===================================================================
--- trunk/cluster/dlmmaster.c	2004-12-09 23:19:25 UTC (rev 1701)
+++ trunk/cluster/dlmmaster.c	2004-12-14 05:49:17 UTC (rev 1702)
@@ -194,7 +194,7 @@
 	mle = kmalloc(sizeof(dlm_master_list_entry), GFP_KERNEL);
 	res = kmalloc(sizeof(dlm_lock_resource), GFP_KERNEL);
 	if (!mle || !res) {
-		printk("could not allocate memory for new lock resource!\n");
+		dlmprintk0("could not allocate memory for new lock resource!\n");
 		if (mle)
 			kfree(mle);
 		if (res)
@@ -244,7 +244,7 @@
 			continue;
 
 		if (tmpmle->type == DLM_MLE_MASTER) {
-			printk("impossible!  master entry for nonexistent lock!\n");
+			dlmprintk0("impossible!  master entry for nonexistent lock!\n");
 			BUG();
 		}
 		dlm_get_mle(tmpmle);
@@ -256,7 +256,7 @@
 	if (!blocked) {
 		/* go ahead and try to master lock on this node */
 		if (dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 1)) {
-			printk("bug! failed to register hb callbacks\n");
+			dlmprintk0("bug! failed to register hb callbacks\n");
 			BUG();
 		}
 		list_add(&mle->list, &dlm_master_list);
@@ -288,14 +288,14 @@
 	while (1) {
 		bit = find_next_bit (mle->vote_map, NM_MAX_NODES, start);
 		if (bit >= NM_MAX_NODES) {
-			printk("no more nodes\n");
+			dlmprintk0("no more nodes\n");
 			break;
 		}
 		
 		ret = dlm_do_master_request(mle, bit);
 		if (ret < 0) {
 			// TODO
-			//printk("dlm_do_master_request returned %d!\n", ret);
+			//dlmprintk("dlm_do_master_request returned %d!\n", ret);
 		}
 		if (mle->master != NM_MAX_NODES) {
 			// found a master!
@@ -317,7 +317,7 @@
 		spin_lock(&mle->spinlock);
 		if (mle->master != NM_MAX_NODES) {
 			u16 m = mle->master;
-			// printk("node %u is the master!\n", m);
+			// dlmprintk("node %u is the master!\n", m);
 			spin_unlock(&mle->spinlock);
 
 			spin_lock(&res->spinlock);
@@ -328,14 +328,14 @@
 		restart = 0;
 		map_changed = (memcmp(mle->vote_map, mle->node_map, sizeof(mle->vote_map)) != 0);
 		if (memcmp(mle->vote_map, mle->response_map, sizeof(mle->vote_map)) == 0) {
-			// printk("every node has responded...\n");
+			// dlmprintk("every node has responded...\n");
 			if (map_changed) {
-				printk("eek! got all original nodes, but nodemap changed while collecting responses\n");
+				dlmprintk0("eek! got all original nodes, but nodemap changed while collecting responses\n");
 				restart = 1;
 			}
 
 			if (mle->error) {
-				printk("ugh.  some node hit an error (-ENOMEM).  try the whole thing again\n"); 
+				dlmprintk0("ugh.  some node hit an error (-ENOMEM).  try the whole thing again\n"); 
 				mle->error = 0;
 				/* TODO: treat this just like the dead node case below,
 				 * cleanup and start over, but keep the error node around */
@@ -348,42 +348,42 @@
 				/* they should have put a dummy entry on dlm_master_list */
 				/* need to assert myself as the master */
 				
-				// printk("I am the only node in-progress!  asserting myself as master\n");
+				// dlmprintk0("I am the only node in-progress!  asserting myself as master\n");
 				assert = 1;
 			} else {
 				/* other nodes are in-progress */
 				if (map_changed && !test_bit(bit, mle->node_map)) {
 					/* TODO: need to copy the node_map into the vote_map, zero 
 					 * everything out and start over */
-					printk("need to handle this case!  winning node %u just died!\n", bit);
+					dlmprintk("need to handle this case!  winning node %u just died!\n", bit);
 					restart = 1;
 				}
 
 				if (bit > dlm->group_index) {
-					// printk("next in-progress node (%u) is higher than me (%u)\n",
+					// dlmprintk("next in-progress node (%u) is higher than me (%u)\n",
 					//        bit, dlm->group_index);
 
 					/* nodes not in-progress should be locking out this lockid until I assert */
 					/* in-progress nodes should match me up with their lowest maybe_map bit */
 					/* need to assert myself as the master */
 
-					// printk("I am the lowest node!  asserting myself as master\n");
+					// dlmprintk("I am the lowest node!  asserting myself as master\n");
 					assert = 1;
 				} else {
 					/* need to sit around and wait for assert */
 					/* my lowest maybe_map bit should be the one to assert */
 					/* just fall through and sleep. should be woken by the handler */
 
-					// printk("sleeping while waiting for %u to assert himself as master\n", bit);
+					// dlmprintk("sleeping while waiting for %u to assert himself as master\n", bit);
 				}
 			}
 		} else {
 			if (map_changed) {
 				/* TODO: need to handle this */
-				printk("eek! nodemap changed while collecting responses\n");
+				dlmprintk0("eek! nodemap changed while collecting responses\n");
 				restart = 1;
 			}
-			// printk("still waiting for all nodes to respond...\n");
+			// dlmprintk0("still waiting for all nodes to respond...\n");
 		}
 
 		if (restart && assert)
@@ -397,18 +397,18 @@
 		
 		if (assert) {
 			ret = dlm_do_assert_master(mle);
-			// printk("assert returned %d!\n", ret);
+			// dlmprintk("assert returned %d!\n", ret);
 			if (ret == 0) {
 				spin_lock(&res->spinlock);
 				res->owner = dlm->group_index;
 				spin_unlock(&res->spinlock);
-				// printk("wooo!  i am the owner.  phew!\n");
+				// dlmprintk("wooo!  i am the owner.  phew!\n");
 				break;
 			} else 
 				restart = 1;
 		}
 		if (restart) {
-			printk("something happened such that the master process needs to be restarted!\n");
+			dlmprintk0("something happened such that the master process needs to be restarted!\n");
 			/* TODO: clear it all out and start over */
 		}
 
@@ -460,14 +460,14 @@
 		spin_lock(&res->spinlock);
 		if (res->owner == dlm->group_index) {
 			spin_unlock(&res->spinlock);
-			// printk("this node is the master\n");
+			// dlmprintk0("this node is the master\n");
 			response = DLM_MASTER_RESP_YES;
 			if (mle)
 				kfree(mle);
 			goto send_response;
 		} else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
 			spin_unlock(&res->spinlock);
-			// printk("node %u is the master\n", res->owner);
+			// dlmprintk("node %u is the master\n", res->owner);
 			response = DLM_MASTER_RESP_NO;
 			if (mle)
 				kfree(mle);
@@ -478,11 +478,11 @@
 		 * being blocked, or it is actively trying to
 		 * master this lock. */
 		if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
-			printk("bug! lock with no owner should be in-progress!\n");
+			dlmprintk0("bug! lock with no owner should be in-progress!\n");
 			BUG();
 		}
 
-		// printk("lockres is in progress...\n");
+		// dlmprintk0("lockres is in progress...\n");
 		found = 0;
 		spin_lock(&dlm_master_lock);
 		list_for_each(iter, &dlm_master_list) {
@@ -493,10 +493,10 @@
 			dlm_get_mle(tmpmle);
 			spin_lock(&tmpmle->spinlock);
 			if (tmpmle->type == DLM_MLE_BLOCK) {
-				// printk("this node is waiting for lockres to be mastered\n");
+				// dlmprintk0("this node is waiting for lockres to be mastered\n");
 				response = DLM_MASTER_RESP_NO;
 			} else {
-				// printk("this node is attempting to master lockres\n");
+				// dlmprintk0("this node is attempting to master lockres\n");
 				response = DLM_MASTER_RESP_MAYBE;
 			}
 			set_bit(request->node_idx, tmpmle->maybe_map);
@@ -512,7 +512,7 @@
 		}
 		spin_unlock(&dlm_master_lock);
 		spin_unlock(&res->spinlock);
-		printk("bug bug bug!!!  no mle found for this lock!\n");
+		dlmprintk0("bug bug bug!!!  no mle found for this lock!\n");
 		BUG();
 	}
 	
@@ -535,7 +535,7 @@
 
 	if (!found) {
 		/* this lockid has never been seen on this node yet */
-		// printk("no mle found\n");
+		// dlmprintk0("no mle found\n");
 		if (!mle) {
 			spin_unlock(&dlm_master_lock);
 			spin_unlock(&dlm->spinlock);
@@ -547,7 +547,7 @@
 				goto send_response;
 			}
 			if (dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, &lockname, 0)) {
-				printk("eeek!\n");
+				dlmprintk0("eeek!\n");
 				response = DLM_MASTER_RESP_ERROR;
 				dlm_put_mle(mle);
 				goto send_response;
@@ -555,12 +555,12 @@
 			goto way_up_top;
 		}
 
-		// printk("this is second time thru, already allocated, add the block.\n");
+		// dlmprintk0("this is second time thru, already allocated, add the block.\n");
 		set_bit(request->node_idx, mle->maybe_map);
 		list_add(&mle->list, &dlm_master_list);
 		response = DLM_MASTER_RESP_NO;
 	} else {
-		// printk("mle was found\n");
+		// dlmprintk0("mle was found\n");
 		spin_lock(&tmpmle->spinlock);
 		if (tmpmle->type == DLM_MLE_BLOCK)
 			response = DLM_MASTER_RESP_NO;
@@ -575,9 +575,9 @@
 
 send_response:
 	//ret = dlm_do_master_request_resp(dlm, &lockname, response, request->node_idx);
-	//printk("response returned %d\n", ret);
+	//dlmprintk("response returned %d\n", ret);
 	
-	// printk("sending response %d to other node\n", response);
+	// dlmprintk("sending response %d to other node\n", response);
 	return response;
 }
 
@@ -612,7 +612,7 @@
 
 		dlm_get_mle(mle);
 		if (mle->type == DLM_MLE_BLOCK) {
-			printk("eek! cannot get a response for a block!\n");
+			dlmprintk0("eek! cannot get a response for a block!\n");
 			break;
 		}
 		found = 1;
@@ -621,30 +621,30 @@
 		switch (resp->response) {
 			case DLM_MASTER_RESP_YES:
 				set_bit(resp->node_idx, mle->response_map);
-				// printk("woot!  node %u is the master!\n", resp->node_idx);
+				// dlmprintk("woot!  node %u is the master!\n", resp->node_idx);
 				mle->master = resp->node_idx;
 				wake = 1;
 				break;
 			case DLM_MASTER_RESP_NO:
-				// printk("node %u is not the master, not in-progress\n", resp->node_idx);
+				// dlmprintk("node %u is not the master, not in-progress\n", resp->node_idx);
 				set_bit(resp->node_idx, mle->response_map);
 				if (memcmp(mle->response_map, mle->vote_map, sizeof(mle->vote_map))==0)
 					wake = 1;
 				break;
 			case DLM_MASTER_RESP_MAYBE:
-				// printk("node %u is not the master, but IS in-progress\n", resp->node_idx);
+				// dlmprintk("node %u is not the master, but IS in-progress\n", resp->node_idx);
 				set_bit(resp->node_idx, mle->response_map);
 				set_bit(resp->node_idx, mle->maybe_map);
 				if (memcmp(mle->response_map, mle->vote_map, sizeof(mle->vote_map))==0)
 					wake = 1;
 				break;
 			case DLM_MASTER_RESP_ERROR:
-				printk("node %u hit an -ENOMEM!  try this whole thing again\n", resp->node_idx);
+				dlmprintk("node %u hit an -ENOMEM!  try this whole thing again\n", resp->node_idx);
 				mle->error = 1;
 				wake = 1;
 				break;
 			default:
-				printk("bad response! %u\n", resp->response);
+				dlmprintk("bad response! %u\n", resp->response);
 				break;
 		}
 		if (wake) {		
@@ -659,7 +659,7 @@
 	if (found)
 		dlm_put_mle(mle);
 	else
-		printk("hrrm... got a master resp but found no matching request\n");
+		dlmprintk0("hrrm... got a master resp but found no matching request\n");
 	return 0;
 }
 
@@ -697,18 +697,18 @@
 		mle = NULL;
 	}
 	if (!mle) {
-		printk("EEEEEEK!  just got an assert_master from %u, but no MLE for it!\n",
+		dlmprintk("EEEEEEK!  just got an assert_master from %u, but no MLE for it!\n",
 		       assert->node_idx);
 		spin_unlock(&dlm_master_lock);
 		goto check_lockres;
 	}
 	if ((bit = find_next_bit (mle->maybe_map, NM_MAX_NODES, 0)) >= NM_MAX_NODES) {
-		printk("EEK! no bits set in the maybe_map, but %u is asserting!\n",
+		dlmprintk("EEK! no bits set in the maybe_map, but %u is asserting!\n",
 		       assert->node_idx);
 		BUG();
 	} else if (bit != assert->node_idx) {
 		/* TODO: is this ok?  */
-		printk("EEK! expected %u to be the master, but %u is asserting!\n", 
+		dlmprintk("EEK! expected %u to be the master, but %u is asserting!\n", 
 		       bit, assert->node_idx);
 		BUG();
 	}
@@ -722,19 +722,19 @@
 		spin_lock(&res->spinlock);
 		if (!mle) {
 			if (res->owner != assert->node_idx) {
-				printk("EEEEeeEEeeEEEK!  assert_master from %u, but current owner is %u!\n",
+				dlmprintk("EEEEeeEEeeEEEK!  assert_master from %u, but current owner is %u!\n",
 				       assert->node_idx, res->owner);
 				BUG();
 			}
 		} else {
 			if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
-				printk("EEEEEEEEEEEEEEEEEK!!! got assert_master from node %u, but %u is the owner!\n",
+				dlmprintk("EEEEEEEEEEEEEEEEEK!!! got assert_master from node %u, but %u is the owner!\n",
 			       		assert->node_idx, res->owner);
-				printk("goodnite!\n");
+				dlmprintk0("goodnite!\n");
 				BUG();
 			}
 			if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
-				printk("bug! got assert from %u, but lock with no owner should be in-progress!\n",
+				dlmprintk("bug! got assert from %u, but lock with no owner should be in-progress!\n",
 			       		assert->node_idx);
 				BUG();
 			}
@@ -743,7 +743,7 @@
 	}
 	spin_unlock(&dlm->spinlock);
 
-	// printk("woo!  got an assert_master from node %u!\n", assert->node_idx);
+	// dlmprintk("woo!  got an assert_master from node %u!\n", assert->node_idx);
 	if (mle) {
 		spin_lock(&mle->spinlock);
 		mle->master = assert->node_idx;
@@ -785,33 +785,33 @@
 			switch (response) {
 				case DLM_MASTER_RESP_YES:
 					set_bit(to, mle->response_map);
-					// printk("woot!  node %u is the master!\n", to);
+					// dlmprintk("woot!  node %u is the master!\n", to);
 					mle->master = to;
 					break;
 				case DLM_MASTER_RESP_NO:
-					// printk("node %u is not the master, not in-progress\n", to);
+					// dlmprintk("node %u is not the master, not in-progress\n", to);
 					set_bit(to, mle->response_map);
 					break;
 				case DLM_MASTER_RESP_MAYBE:
-					// printk("node %u is not the master, but IS in-progress\n", to);
+					// dlmprintk("node %u is not the master, but IS in-progress\n", to);
 					set_bit(to, mle->response_map);
 					set_bit(to, mle->maybe_map);
 					break;
 				case DLM_MASTER_RESP_ERROR:
-					printk("node %u hit an -ENOMEM!  try this whole thing again\n", to);
+					dlmprintk("node %u hit an -ENOMEM!  try this whole thing again\n", to);
 					mle->error = 1;
 					break;
 				default:
-					printk("bad response! %u\n", response);
+					dlmprintk("bad response! %u\n", response);
 					ret = -EINVAL;
 					break;
 			}
 			spin_unlock(&mle->spinlock);
 		} else {
-			printk("net_send_message returned %d!\n", ret);
+			dlmprintk("net_send_message returned %d!\n", ret);
 		}
 	} else {
-		printk("nm_get_group_node_by_index failed to find inode for node %d!\n", to);
+		dlmprintk("nm_get_group_node_by_index failed to find inode for node %d!\n", to);
 	}	
 	return ret;
 }
@@ -851,10 +851,10 @@
 	while (1) {
 		to = find_next_bit (mle->vote_map, NM_MAX_NODES, start);
 		if (to >= NM_MAX_NODES) {
-			// printk("no more nodes\n");
+			// dlmprintk0("no more nodes\n");
 			break;
 		}
-		// printk("sending assert master to %d\n", to);
+		// dlmprintk("sending assert master to %d\n", to);
 
 		memset(&assert, 0, sizeof(assert));
 		assert.node_idx = dlm->group_index;
@@ -869,7 +869,7 @@
 		inode = nm_get_group_node_by_index(dlm->group, to);
 		if (!inode) {
 			tmpret = -EINVAL;
-			printk("could not get nm info for node %d!  need to retry this whole thing\n", to);
+			dlmprintk("could not get nm info for node %d!  need to retry this whole thing\n", to);
 			ret = tmpret;
 			break;
 		}
@@ -878,7 +878,7 @@
 
 		if (tmpret < 0) {
 			// TODO
-			// printk("assert_master returned %d!\n", tmpret);
+			// dlmprintk("assert_master returned %d!\n", tmpret);
 			ret = tmpret;
 			break;
 		}
@@ -903,11 +903,11 @@
 
 	mle = data;
 	if (!mle) {
-		printk("eek! NULL mle!\n");
+		dlmprintk0("eek! NULL mle!\n");
 		return;
 	}
 	if (!mle->dlm) {
-		printk("eek! NULL dlm\n");
+		dlmprintk0("eek! NULL dlm\n");
 		return;
 	}
        	dlm = mle->dlm;
@@ -917,13 +917,13 @@
 	spin_lock(&mle->spinlock);
 
 	if (!test_bit(idx, mle->node_map))
-		printk("node %u already removed from nodemap!\n", idx);
+		dlmprintk("node %u already removed from nodemap!\n", idx);
 	else
 		clear_bit(idx, mle->node_map);
 
 #if 0	
 	if (test_bit(idx, mle->recovery_map))
-		printk("node %u already added to recovery map!\n", idx);
+		dlmprintk("node %u already added to recovery map!\n", idx);
 	else
 		set_bit(idx, mle->recovery_map);
 #endif
@@ -938,11 +938,11 @@
 
 	mle = data;
 	if (!mle) {
-		printk("eek! NULL mle!\n");
+		dlmprintk0("eek! NULL mle!\n");
 		return;
 	}
 	if (!mle->dlm) {
-		printk("eek! NULL dlm\n");
+		dlmprintk0("eek! NULL dlm\n");
 		return;
 	}
        	dlm = mle->dlm;
@@ -953,12 +953,12 @@
 
 #if 0	
 	if (test_bit(idx, mle->recovery_map))
-		printk("BUG!!! node up message on node in recovery (%u)!!!\n", idx);
+		dlmprintk("BUG!!! node up message on node in recovery (%u)!!!\n", idx);
 	else 
 #endif
 	{
 		if (test_bit(idx, mle->node_map))
-			printk("node %u already in node map!!!\n", idx);
+			dlmprintk("node %u already in node map!!!\n", idx);
 		else 
 			set_bit(idx, mle->node_map);
 	}

Modified: trunk/cluster/dlmmod.c
===================================================================
--- trunk/cluster/dlmmod.c	2004-12-09 23:19:25 UTC (rev 1701)
+++ trunk/cluster/dlmmod.c	2004-12-14 05:49:17 UTC (rev 1702)
@@ -157,7 +157,7 @@
 	int status;
 
 
-	printk("Loaded dlm Driver module\n");
+	dlmprintk0("Loaded dlm Driver module\n");
 	status = dlm_read_params();
 	if (status < 0)
 		return -1;
@@ -188,7 +188,7 @@
  */
 static void __exit dlm_driver_exit (void)
 {
-	printk("Unloaded dlm Driver module\n");
+	dlmprintk0("Unloaded dlm Driver module\n");
 	return;
 }				/* dlm_driver_exit */
 
@@ -238,9 +238,9 @@
 		if (lock->lksb != lksb || lock->ast != ast ||
 		    lock->bast != bast || lock->astdata != data) {
 			status = DLM_BADARGS;
-			printk("ERROR new args:  lksb=%p, ast=%p, bast=%p, astdata=%p\n", 
+			dlmprintk("ERROR new args:  lksb=%p, ast=%p, bast=%p, astdata=%p\n", 
 			       lksb, ast, bast, data);
-			printk("      orig args: lksb=%p, ast=%p, bast=%p, astdata=%p\n", 
+			dlmprintk("      orig args: lksb=%p, ast=%p, bast=%p, astdata=%p\n", 
 			       lock->lksb, lock->ast, lock->bast, lock->astdata);
 			goto error_status;
 		}
@@ -249,14 +249,14 @@
 		down_read(&dlm->recovery_sem);
 		spin_lock(&res->spinlock);
 		if (flags & LKM_LOCAL) {
-			printk("strange LOCAL convert request!\n");
+			dlmprintk0("strange LOCAL convert request!\n");
 			if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
 				spin_unlock(&res->spinlock);
 				status = DLM_BADPARAM;
 				goto up_error;
 			}
 			res->owner = dlm->group_index;
-			printk("set owner to this node.  you SURE thats what you want!?\n");
+			dlmprintk0("set owner to this node.  you SURE thats what you want!?\n");
 		}
 		status = do_dlmconvert(dlm, res, lock, flags, mode);
 	} else {
@@ -289,7 +289,7 @@
 	rdtsc(u1.hilo[0], u1.hilo[1]);
 		res = dlm_get_lock_resource(dlm, &q, flags);
 	rdtsc(u2.hilo[0], u2.hilo[1]);
-	printk("dlm_get_lock_resource took %llu cycles\n", u2.q-u1.q);
+	dlmprintk("dlm_get_lock_resource took %llu cycles\n", u2.q-u1.q);
 }
 		if (!res) {
 			status = DLM_IVLOCKID;
@@ -355,7 +355,7 @@
 	tmplock->cookie = dlm_next_cookie;
 	dlm_next_cookie++;
 	if (dlm_next_cookie & 0xff00000000000000ull) {
-		printk("eek! this node's cookie will now wrap!\n");
+		dlmprintk0("eek! this node's cookie will now wrap!\n");
 		dlm_next_cookie = 1;
 	}
 	c[7] = (u8)(tmplock->node & 0x00ff);
@@ -420,7 +420,7 @@
 	list_add_tail(&lock->list, &res->granted);
 
 	if (dlm_do_ast(dlm, res, lock) < 0)
-		printk("eek\n");
+		dlmprintk0("eek\n");
 	got_it = 1;
 
 done:
@@ -487,7 +487,7 @@
 		status = dlmconvert_remote(dlm, res, lock, flags, type);
 
 	rdtsc(u2.hilo[0], u2.hilo[1]);
-	printk("dlmconvert took %llu cycles\n", u2.q-u1.q);
+	dlmprintk("dlmconvert took %llu cycles\n", u2.q-u1.q);
 }
 	return status;
 }
@@ -505,7 +505,7 @@
 
 	/* already converting? */
 	if (lock->convert_type != LKM_IVMODE) {
-		printk("attempted to convert a lock with a lock conversion pending\n");
+		dlmprintk0("attempted to convert a lock with a lock conversion pending\n");
 		spin_unlock(&lock->spinlock);
 		spin_unlock(&res->spinlock);
 		return DLM_DENIED;
@@ -513,7 +513,7 @@
 
 	/* must be on grant queue to convert */
 	if (!dlm_lock_on_list(&res->granted, lock)) {
-		printk("attempted to convert a lock not on grant queue\n");
+		dlmprintk0("attempted to convert a lock not on grant queue\n");
 		spin_unlock(&lock->spinlock);
 		spin_unlock(&res->spinlock);
 		return DLM_DENIED;
@@ -550,7 +550,7 @@
 		dlmprintk0("doing in-place convert for nonlocal lock\n");
 
 	/* immediately grant the new lock type */
-	//printk("doing in-place %sconvert from %d to %d\n", 
+	//dlmprintk("doing in-place %sconvert from %d to %d\n", 
 	//       type > lock->type ? "up" : "down", lock->type, type);
 	lock->type = type;
 	status = DLM_NORMAL;
@@ -561,7 +561,7 @@
 		lock->lksb->status = DLM_NORMAL;
 
 	if (dlm_do_ast(dlm, res, lock) < 0)
-		printk("eek\n");
+		dlmprintk0("eek\n");
 
 	spin_unlock(&lock->spinlock);
 	spin_unlock(&res->spinlock);
@@ -613,7 +613,7 @@
 	list_del(&lock->list);
 	list_add_tail(&lock->list, &res->converting);
 	if (lock->convert_type != LKM_IVMODE) {
-		printk("error! converting a remote lock that is already converting!\n");
+		dlmprintk0("error! converting a remote lock that is already converting!\n");
 		/* TODO: return correct error */
 		BUG();
 	}
@@ -660,7 +660,7 @@
 		return DLM_BADPARAM;
 
 	if ((flags & (LKM_VALBLK | LKM_CANCEL)) == (LKM_VALBLK | LKM_CANCEL)) {
-		printk("VALBLK given with CANCEL: ignoring VALBLK\n");
+		dlmprintk0("VALBLK given with CANCEL: ignoring VALBLK\n");
 		flags &= ~LKM_VALBLK;
 	}
 
@@ -735,7 +735,7 @@
 		} else {
 			/* err. um. eek! */
 			dlmprintk0("on NO list!\n");
-			printk("lock to cancel is not on any list!  bug!\n");
+			dlmprintk0("lock to cancel is not on any list!  bug!\n");
 			lksb->status = DLM_IVLOCKID;
 			status = DLM_IVLOCKID;
 			free_lock = 0;
@@ -867,7 +867,7 @@
 				ret = status;
 			lksb->status = status;
 		} else {
-			printk("error occurred in net_send_message: %d\n", tmpret);
+			dlmprintk("error occurred in net_send_message: %d\n", tmpret);
 			ret = dlm_err_to_dlm_status(tmpret);
 			lksb->status = ret;
 		}
@@ -920,7 +920,7 @@
 		spin_unlock(&res->spinlock);
 	}
 	if (!found)
-		printk("failed to find lock to unlock!  cookie=%llu\n", unlock->cookie);
+		dlmprintk("failed to find lock to unlock!  cookie=%llu\n", unlock->cookie);
 	else
 		status = lksb.status;
 
@@ -998,13 +998,13 @@
 	char *netbuf;
 
 	if (strlen(domain) > NM_MAX_NAME_LEN) {
-		printk("domain name length too long\n");
+		dlmprintk0("domain name length too long\n");
 		goto leave;
 	}
 
 	group = nm_get_group_by_name(group_name);
 	if (!group) {
-		printk("no nm group %s for domain %s!\n", group_name, domain);
+		dlmprintk("no nm group %s for domain %s!\n", group_name, domain);
 		goto leave;
 	}
 
@@ -1025,7 +1025,7 @@
 
 	dlm = kmalloc(sizeof(dlm_ctxt), GFP_KERNEL);
 	if (dlm == NULL) {
-		printk("could not allocate dlm_ctxt\n");
+		dlmprintk0("could not allocate dlm_ctxt\n");
 		goto leave;
 	}
 	memset(dlm, 0, sizeof(dlm_ctxt));
@@ -1033,7 +1033,7 @@
 	if (dlm->name == NULL) {
 		kfree(dlm);
 		dlm = NULL;
-		printk("could not allocate dlm domain name\n");
+		dlmprintk0("could not allocate dlm domain name\n");
 		goto leave;
 	}
 	dlm->net_buf = (char *) __get_free_page(GFP_KERNEL);
@@ -1041,7 +1041,7 @@
 		kfree(dlm->name);
 		kfree(dlm);
 		dlm = NULL;
-		printk("could not allocate dlm network temporary buffer\n");
+		dlmprintk0("could not allocate dlm network temporary buffer\n");
 		goto leave;
 	}
 	dlm->resources = (struct list_head *) __get_free_page(GFP_KERNEL);
@@ -1050,7 +1050,7 @@
 		kfree(dlm);
 		free_page((unsigned long)dlm->net_buf);
 		dlm = NULL;
-		printk("could not allocate dlm hash\n");
+		dlmprintk0("could not allocate dlm hash\n");
 		goto leave;
 	}
 	memset(dlm->resources, 0, PAGE_SIZE);
@@ -1198,7 +1198,7 @@
 	if (tmpret)
 		goto error;
 	netbuf += L1_CACHE_ALIGN(sizeof(dlm_proxy_ast));
-// printk("netbuf=%p net_buf=%p diff=%d\n", netbuf, dlm->net_buf, ((char *)netbuf - (char *)dlm->net_buf));   // currently 768
+// dlmprintk("netbuf=%p net_buf=%p diff=%d\n", netbuf, dlm->net_buf, ((char *)netbuf - (char *)dlm->net_buf));   // currently 768
 	
 	tmpret = dlm_launch_thread(dlm);
 	if (tmpret == 0)
@@ -1297,7 +1297,7 @@
 		return dlm_send_proxy_ast(dlm, res, lock, DLM_AST, 0);
 	}
 	if (!fn) {
-		printk("eek! lock has no ast %*s!  cookie=%llu\n", 
+		dlmprintk("eek! lock has no ast %*s!  cookie=%llu\n", 
 		       res->lockname.len, res->lockname.name, lock->cookie);
 		return -EINVAL;
 	}
@@ -1317,7 +1317,7 @@
 	}
 
 	if (!fn) {
-		printk("eek! lock has no bast %*s!  cookie=%llu\n", 
+		dlmprintk("eek! lock has no bast %*s!  cookie=%llu\n", 
 		       res->lockname.len, res->lockname.name, lock->cookie);
 		return -EINVAL;
 	}
@@ -1347,7 +1347,7 @@
 		iput(inode);
 	}
 	if (ret < 0) {
-		printk("(%d) dlm_send_proxy_ast: returning %d\n", current->pid, ret);
+		dlmprintk("(%d) dlm_send_proxy_ast: returning %d\n", current->pid, ret);
 	}
 	return ret;
 }
@@ -1369,14 +1369,14 @@
 
 	if (past->type != DLM_AST && 
 	    past->type != DLM_BAST) {
-		printk("Eeeek unknown ast type! %d, cookie=%llu, name=%*s\n", 
+		dlmprintk("Eeeek unknown ast type! %d, cookie=%llu, name=%*s\n", 
 		       past->type, cookie, lockname.len, lockname.name);
 		return 0;
 	}
 
 	res = dlm_lookup_lock(dlm, &lockname);
 	if (!res) {
-		printk("eek! got %sast for unknown lockres!  cookie=%llu, name=%*s, namelen=%d\n", 
+		dlmprintk("eek! got %sast for unknown lockres!  cookie=%llu, name=%*s, namelen=%d\n", 
 		       past->type == DLM_AST ? "" : "b", cookie, lockname.len, lockname.name, lockname.len);
 		return 0;
 	}
@@ -1406,7 +1406,7 @@
 			goto do_ast;
 	}
 
-	printk("eek! got %sast for unknown lock!  cookie=%llu, name=%*s, namelen=%d\n", 
+	dlmprintk("eek! got %sast for unknown lock!  cookie=%llu, name=%*s, namelen=%d\n", 
 	       past->type == DLM_AST ? "" : "b", cookie, lockname.len, lockname.name, lockname.len);
 	spin_unlock(&res->spinlock);
 	if (!dlm_is_recovery_lock(past->name, past->namelen))
@@ -1440,7 +1440,7 @@
 	}
 
 	if (status < 0)
-		printk("eeek: ast/bast returned %d\n", status);
+		dlmprintk("eeek: ast/bast returned %d\n", status);
 
 	spin_unlock(&res->spinlock);
 	if (!dlm_is_recovery_lock(past->name, past->namelen))
@@ -1487,7 +1487,7 @@
 			// successfully sent and received
 			ret = status;  // this is already a dlm_status
 		} else {
-			printk("error occurred in net_send_message: %d\n", tmpret);
+			dlmprintk("error occurred in net_send_message: %d\n", tmpret);
 			ret = dlm_err_to_dlm_status(tmpret);
 		}
 		iput(inode);
@@ -1573,7 +1573,7 @@
 			// successfully sent and received
 			ret = status;  // this is already a dlm_status
 		} else {
-			printk("error occurred in net_send_message: %d\n", tmpret);
+			dlmprintk("error occurred in net_send_message: %d\n", tmpret);
 			ret = dlm_err_to_dlm_status(tmpret);
 		}
 		iput(inode);
@@ -1624,7 +1624,7 @@
 		spin_unlock(&res->spinlock);
 	}
 	if (!found)
-		printk("failed to find lock to convert on grant queue!  cookie=%llu\n", convert->cookie);
+		dlmprintk("failed to find lock to convert on grant queue!  cookie=%llu\n", convert->cookie);
 
 	rdtsc(u7.hilo[0], u7.hilo[1]);
 	dlmprintk("1-2:%llu 2-3:%llu 3-4:%llu 4-5:%llu 5-6:%llu 6-7:%llu\n",
@@ -1637,7 +1637,7 @@
 	dlm_ctxt *dlm;
 	struct list_head *iter;
 
-	printk("dumping ALL dlm state for node %s\n", system_utsname.nodename);
+	dlmprintk("dumping ALL dlm state for node %s\n", system_utsname.nodename);
 	spin_lock(&dlm_domain_lock);
 	list_for_each(iter, &dlm_domains) {
 		dlm = list_entry (iter, dlm_ctxt, list);
@@ -1654,10 +1654,10 @@
 	struct list_head *bucket;
 	int i;
 
-	printk("dlm_ctxt: %s, group=%u, key=%u\n", dlm->name, dlm->group_index, dlm->key);
-	printk("some bug here... should not have to check for this...\n");
+	dlmprintk("dlm_ctxt: %s, group=%u, key=%u\n", dlm->name, dlm->group_index, dlm->key);
+	dlmprintk0("some bug here... should not have to check for this...\n");
 	if (!dlm || !dlm->name) {
-		printk("wtf... dlm=%p\n", dlm);
+		dlmprintk("wtf... dlm=%p\n", dlm);
 		return;
 	}
 		
@@ -1666,30 +1666,30 @@
 		bucket = &(dlm->resources[i]);
 		list_for_each(iter, bucket) {
 			res = list_entry(iter, dlm_lock_resource, list);
-			printk("lockres: %*s, owner=%u, state=%u\n", res->lockname.len, res->lockname.name,
+			dlmprintk("lockres: %*s, owner=%u, state=%u\n", res->lockname.len, res->lockname.name,
 			       res->owner, res->state);
 			spin_lock(&res->spinlock);
-			printk("  granted queue: \n");
+			dlmprintk0("  granted queue: \n");
 			list_for_each(iter2, &res->granted) {
 				lock = list_entry(iter2, dlm_lock, list);
 				spin_lock(&lock->spinlock);
-				printk("    type=%d, conv=%d, node=%u, cookie=%llu\n", 
+				dlmprintk("    type=%d, conv=%d, node=%u, cookie=%llu\n", 
 				       lock->type, lock->convert_type, lock->node, lock->cookie);
 				spin_unlock(&lock->spinlock);
 			}
-			printk("  converting queue: \n");
+			dlmprintk0("  converting queue: \n");
 			list_for_each(iter2, &res->converting) {
 				lock = list_entry(iter2, dlm_lock, list);
 				spin_lock(&lock->spinlock);
-				printk("    type=%d, conv=%d, node=%u, cookie=%llu\n", 
+				dlmprintk("    type=%d, conv=%d, node=%u, cookie=%llu\n", 
 				       lock->type, lock->convert_type, lock->node, lock->cookie);
 				spin_unlock(&lock->spinlock);
 			}
-			printk("  blocked queue: \n");
+			dlmprintk0("  blocked queue: \n");
 			list_for_each(iter2, &res->blocked) {
 				lock = list_entry(iter2, dlm_lock, list);
 				spin_lock(&lock->spinlock);
-				printk("    type=%d, conv=%d, node=%u, cookie=%llu\n", 
+				dlmprintk("    type=%d, conv=%d, node=%u, cookie=%llu\n", 
 				       lock->type, lock->convert_type, lock->node, lock->cookie);
 				spin_unlock(&lock->spinlock);
 			}

Modified: trunk/cluster/dlmmod.h
===================================================================
--- trunk/cluster/dlmmod.h	2004-12-09 23:19:25 UTC (rev 1701)
+++ trunk/cluster/dlmmod.h	2004-12-14 05:49:17 UTC (rev 1702)
@@ -34,8 +34,8 @@
 #define dlmprintk(x, arg...)
 #define dlmprintk0(x)
 #else
-#define dlmprintk(x, arg...)    printk("(%d)(%s:%d) " x, current->pid, __FUNCTION__, __LINE__, ##arg)
-#define dlmprintk0(x)           printk("(%d)(%s:%d) " x, current->pid, __FUNCTION__, __LINE__)
+#define dlmprintk(x, arg...)    printk("(dlm:%d)(%s:%d) " x, current->pid, __FUNCTION__, __LINE__, ##arg)
+#define dlmprintk0(x)           printk("(dlm:%d)(%s:%d) " x, current->pid, __FUNCTION__, __LINE__)
 #endif
 
 

Modified: trunk/cluster/dlmrecovery.c
===================================================================
--- trunk/cluster/dlmrecovery.c	2004-12-09 23:19:25 UTC (rev 1701)
+++ trunk/cluster/dlmrecovery.c	2004-12-14 05:49:17 UTC (rev 1702)
@@ -84,22 +84,22 @@
 /* Launch the recovery thread */
 int dlm_launch_recovery_thread(dlm_ctxt *dlm)
 {
-	printk("starting recovery thread...\n");
+	dlmprintk0("starting recovery thread...\n");
 	dlm->reco.thread.pid = kernel_thread (dlm_recovery_thread, dlm, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
 	if (dlm->reco.thread.pid < 0) {
-		printk("unable to launch recovery thread, error=%d", dlm->reco.thread.pid);
+		dlmprintk("unable to launch recovery thread, error=%d", dlm->reco.thread.pid);
 		return -EINVAL;
 	}
-	printk("recovery thread running...\n");
+	dlmprintk0("recovery thread running...\n");
 	return 0;
 }
 
 void dlm_complete_recovery_thread(dlm_ctxt *dlm)
 {
-	printk ("waiting for recovery thread to exit....");
+	dlmprintk0 ("waiting for recovery thread to exit....");
 	send_sig (SIGINT, dlm->reco.thread.task, 0);
 	wait_for_completion (&dlm->reco.thread.complete);
-	printk ("recovery thread exited\n");
+	dlmprintk0 ("recovery thread exited\n");
 	dlm->reco.thread.task = NULL;
 }
 
@@ -150,7 +150,7 @@
 		/* check to see if the new master has died */
 		if (dlm->reco.new_master != NM_INVALID_SLOT_NUM &&
 		    test_bit(dlm->reco.new_master, dlm->recovery_map)) {
-			printk("new master %u died while recovering %u!\n",
+			dlmprintk("new master %u died while recovering %u!\n",
 			       dlm->reco.new_master, dlm->reco.dead_node);
 			// unset the new_master, leave dead_node
 			dlm->reco.new_master = NM_INVALID_SLOT_NUM;
@@ -163,7 +163,7 @@
 				dlm->reco.dead_node = NM_INVALID_SLOT_NUM;
 		} else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
 			// BUG?
-			printk("dead_node %u no longer in recovery map!\n",
+			dlmprintk("dead_node %u no longer in recovery map!\n",
 			       dlm->reco.dead_node);
 			dlm->reco.dead_node = NM_INVALID_SLOT_NUM;
 		}
@@ -171,7 +171,7 @@
 		spin_unlock(&dlm->spinlock);
 
 		if (dlm->reco.dead_node == NM_INVALID_SLOT_NUM) {
-			printk("nothing to recover!  sleeping now!\n");
+			dlmprintk0("nothing to recover!  sleeping now!\n");
 			goto sleep;
 		}
 
@@ -203,7 +203,7 @@
 		if (dlm->reco.new_master == dlm->group_index) {
 			status = dlm_remaster_locks_local(dlm);
 			if (status < 0) {
-				printk("error remastering locks for node %u!!!!  retrying!\n",
+				dlmprintk("error remastering locks for node %u!!!!  retrying!\n",
 				       dlm->reco.dead_node);
 			} else {
 				// success!  see if any other nodes need recovery
@@ -221,7 +221,7 @@
 		} else {
 			// sit around until new_master is dead or done
 			// we will get signalled by the waitqueue either way
-			printk("new_master %u is recovering dead_node %u... waiting...\n",
+			dlmprintk("new_master %u is recovering dead_node %u... waiting...\n",
 			       dlm->reco.new_master, dlm->reco.dead_node);
 		}
 
@@ -234,18 +234,18 @@
 					     1, DLM_RECOVERY_THREAD_MS);
 		if (status == 0 || status == -ETIMEDOUT) {
 			if (atomic_read(&dlm->reco.thread.woken))
-				printk("aha!!! recovery thread woken!\n");
+				dlmprintk0("aha!!! recovery thread woken!\n");
 			else 
-				printk("timed out waiting, running again\n");
+				dlmprintk0("timed out waiting, running again\n");
 			continue;
 		}
-		printk("recovery thread got %d while waiting\n", status);
+		dlmprintk("recovery thread got %d while waiting\n", status);
 		break;
 	}
 
 	flush_scheduled_work();
 	complete (&dlm->reco.thread.complete);
-	printk("quitting recovery thread!!!!!!\n");
+	dlmprintk0("quitting recovery thread!!!!!!\n");
 	return 0;
 }
 
@@ -308,7 +308,7 @@
 
 int dlm_request_all_locks(dlm_ctxt *dlm, u16 request_from, u16 dead_node)
 {
-	printk("dlm_request_all_locks: dead node is %u, sending request to %u\n",
+	dlmprintk("dlm_request_all_locks: dead node is %u, sending request to %u\n",
 	       dead_node, request_from);
 	// send message
 	// sleep until all received or error
@@ -399,20 +399,20 @@
 	/* check a bunch of ugly conditions */
 	*out_of_order = 0;
 	if (req->num_locks > DLM_LOCKS_PER_PACKET) {
-		printk("num_locks too large! %u\n", req->num_locks);
+		dlmprintk("num_locks too large! %u\n", req->num_locks);
 	} else if (req->seqnum != dlm->reco.next_seq) {
-		printk("expected seq %lu from node %u, got %lu\n",
+		dlmprintk("expected seq %lu from node %u, got %lu\n",
 		       dlm->reco.next_seq, msg->src_node,
 		       req->seqnum);
 		*out_of_order = 1;
 	} else if (dlm->reco.dead_node != req->dead_node) {
-		printk("bad lock array: dead node=%u, sent=%u\n",
+		dlmprintk("bad lock array: dead node=%u, sent=%u\n",
 		       dlm->reco.dead_node != req->dead_node);
 	} else if (dlm->reco.new_master != dlm->group_index) {
-		printk("this node is not the recovery master!\n");
+		dlmprintk0("this node is not the recovery master!\n");
 	} else if (dlm->reco.sending_node != msg->src_node ||
 		 dlm->group_index == msg->dest_node) {
-		printk("eek. sending_node=%u, actual=%u, dest=%u, me=%u\n",
+		dlmprintk0("eek. sending_node=%u, actual=%u, dest=%u, me=%u\n",
 		       dlm->reco.sending_node, msg->src_node, 
 		       msg->dest_node, dlm->group_index);
 	} else
@@ -449,7 +449,7 @@
 
 	newlocks = kmalloc(req->num_locks * sizeof(dlm_lock *), GFP_KERNEL);
 	if (!newlocks) {
-		printk("failed to alloc temp lock array!\n");
+		dlmprintk0("failed to alloc temp lock array!\n");
 		ret = -ENOMEM;
 		goto send_response;
 	}
@@ -606,14 +606,14 @@
 	spin_lock(&dlm->spinlock);
 
 	if (!test_bit(idx, dlm->node_map))
-		printk("node %u already removed from nodemap!\n", idx);
+		dlmprintk("node %u already removed from nodemap!\n", idx);
 	else {
-		printk("node %u being removed from nodemap!\n", idx);
+		dlmprintk("node %u being removed from nodemap!\n", idx);
 		clear_bit(idx, dlm->node_map);
 	}
 	
 	if (test_bit(idx, dlm->recovery_map))
-		printk("node %u already added to recovery map!\n", idx);
+		dlmprintk("node %u already added to recovery map!\n", idx);
 	else {
 		set_bit(idx, dlm->recovery_map);
 		dlm_do_local_recovery_cleanup(dlm, idx, 1);
@@ -630,10 +630,10 @@
 	spin_lock(&dlm->spinlock);
 
 	if (test_bit(idx, dlm->recovery_map)) {
-		printk("BUG!!! node up message on node in recovery (%u)!!!\n", idx);
+		dlmprintk("BUG!!! node up message on node in recovery (%u)!!!\n", idx);
 	} else {
 		if (test_bit(idx, dlm->node_map))
-			printk("node %u already in node map!!!\n", idx);
+			dlmprintk("node %u already in node map!!!\n", idx);
 		else {
 			dlmprintk("node %u being added to node map!!!\n", idx);
 			set_bit(idx, dlm->node_map);

Modified: trunk/cluster/dlmthread.c
===================================================================
--- trunk/cluster/dlmthread.c	2004-12-09 23:19:25 UTC (rev 1701)
+++ trunk/cluster/dlmthread.c	2004-12-14 05:49:17 UTC (rev 1702)
@@ -86,7 +86,7 @@
 		list_for_each(iter, &res->blocked) {
 			b++;
 		}
-		printk("(%d) granted: %d, converting: %d, blocked: %d\n", current->pid, g, c, b);
+		dlmprintk("(%d) granted: %d, converting: %d, blocked: %d\n", current->pid, g, c, b);
 	}
 #endif
 
@@ -95,7 +95,7 @@
 		goto blocked;
 	target = list_entry(res->converting.next, dlm_lock, list);
 	if (target->convert_type == LKM_IVMODE) {
-		printk("eeek!!! converting a lock with no convert_type!!!!\n");
+		dlmprintk0("eeek!!! converting a lock with no convert_type!!!!\n");
 		BUG();
 	}
 	head = &res->granted;
@@ -106,8 +106,8 @@
 		if (!dlm_lock_compatible(lock->type, target->convert_type)) {
 			if (lock->highest_blocked == LKM_IVMODE)
 				list_add(&lock->ast_list, &bast_list);
-			if (lock->highest_blocked < target->type)
-				lock->highest_blocked = lock->type;
+			if (lock->highest_blocked < target->convert_type)
+				lock->highest_blocked = target->convert_type;
 		}
 	}
 	head = &res->converting;
@@ -118,8 +118,8 @@
 		if (!dlm_lock_compatible(lock->type, target->convert_type)) {
 			if (lock->highest_blocked == LKM_IVMODE)
 				list_add(&lock->ast_list, &bast_list);
-			if (lock->highest_blocked < target->type)
-				lock->highest_blocked = lock->type;
+			if (lock->highest_blocked < target->convert_type)
+				lock->highest_blocked = target->convert_type;
 		}
 	}
 	
@@ -138,7 +138,6 @@
 
 		if (target->node == dlm->group_index) {
 			DLM_ASSERT(target->lksb);
-			DLM_ASSERT(target->lksb->status);
 
 			target->lksb->status = DLM_NORMAL;
 		} else {
@@ -148,7 +147,7 @@
 		spin_unlock(&target->spinlock);
 
 		if (dlm_do_ast(dlm, res, target) < 0)
-			printk("eek\n");
+			dlmprintk0("eek\n");
 		/* go back and check for more */
 		goto converting;
 	}
@@ -168,7 +167,7 @@
 			if (lock->highest_blocked == LKM_IVMODE)
 				list_add(&lock->ast_list, &bast_list);
 			if (lock->highest_blocked < target->type)
-				lock->highest_blocked = lock->type;
+				lock->highest_blocked = target->type;
 		}
 	}
 
@@ -181,7 +180,7 @@
 			if (lock->highest_blocked == LKM_IVMODE)
 				list_add(&lock->ast_list, &bast_list);
 			if (lock->highest_blocked < target->type)
-				lock->highest_blocked = lock->type;
+				lock->highest_blocked = target->type;
 		}
 	}
 	
@@ -200,7 +199,6 @@
 
 		if (target->node == dlm->group_index) {
 			DLM_ASSERT(target->lksb);
-			DLM_ASSERT(target->lksb->status);
 		
 			target->lksb->status = DLM_NORMAL;
 		} else {
@@ -210,7 +208,7 @@
 		spin_unlock(&target->spinlock);
 
 		if (dlm_do_ast(dlm, res, target) < 0)
-			printk("eek\n");
+			dlmprintk0("eek\n");
 		/* go back and check for more */
 		goto converting;
 	}
@@ -226,7 +224,7 @@
 		spin_unlock(&lock->spinlock);
 
 		if (dlm_do_bast(dlm, res, lock, hi) < 0)
-			printk("eeek\n");
+			dlmprintk0("eeek\n");
 	}
 	spin_unlock(&res->spinlock);
 }
@@ -254,22 +252,22 @@
 /* Launch the NM thread for the mounted volume */
 int dlm_launch_thread(dlm_ctxt *dlm)
 {
-	printk("starting dlm thread...\n");
+	dlmprintk0("starting dlm thread...\n");
 	dlm->thread.pid = kernel_thread (dlm_thread, dlm, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
 	if (dlm->thread.pid < 0) {
-		printk("unable to launch dlm thread, error=%d", dlm->thread.pid);
+		dlmprintk("unable to launch dlm thread, error=%d", dlm->thread.pid);
 		return -EINVAL;
 	}
-	printk("dlm thread running for %s...\n", dlm->name);
+	dlmprintk("dlm thread running for %s...\n", dlm->name);
 	return 0;
 }
 
 void dlm_complete_thread(dlm_ctxt *dlm)
 {
-	printk ("waiting for dlm thread to exit....");
+	dlmprintk0 ("waiting for dlm thread to exit....");
 	send_sig (SIGINT, dlm->thread.task, 0);
 	wait_for_completion (&dlm->thread.complete);
-	printk ("dlm thread exited\n");
+	dlmprintk0 ("dlm thread exited\n");
 	dlm->thread.task = NULL;
 }
 
@@ -311,19 +309,19 @@
 		if (status == 0 || status == -ETIMEDOUT) {
 #if 0
 			if (atomic_read(&dlm->thread.woken))
-				printk("aha!!! dlm thread woken!\n");
+				dlmprintk0("aha!!! dlm thread woken!\n");
 			else 
-				printk("timed out waiting, running again\n");
+				dlmprintk0("timed out waiting, running again\n");
 #endif
 			continue;
 		}
 	
-		printk("DLM thread got %d while waiting\n", status);
+		dlmprintk("DLM thread got %d while waiting\n", status);
 		break;
 	}
 
 	flush_scheduled_work();
 	complete (&dlm->thread.complete);
-	printk("quitting DLM thread!!!!!!\n");
+	dlmprintk0("quitting DLM thread!!!!!!\n");
 	return 0;
 }

Modified: trunk/cluster/heartbeat.c
===================================================================
--- trunk/cluster/heartbeat.c	2004-12-09 23:19:25 UTC (rev 1701)
+++ trunk/cluster/heartbeat.c	2004-12-14 05:49:17 UTC (rev 1702)
@@ -40,6 +40,7 @@
 #include <linux/init.h>
 #include <linux/sysctl.h>
 #include <linux/random.h>
+#include <linux/version.h>
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
 #include <linux/statfs.h>
 #include <linux/moduleparam.h>
@@ -83,7 +84,7 @@
 static void hb_nm_group_node_del_cb(void *ptr1, void *ptr2, u16 idx);
 static void hb_nm_node_add_cb(void *ptr1, void *ptr2, u16 idx);
 static void hb_nm_group_add_cb(void *ptr1, void *ptr2, u16 idx);
-static int hb_init_disk_hb_group(struct inode *group, kdev_t dev, u32 bits, u32 blocks, u64 start);
+static int hb_init_disk_hb_group(struct inode *group, dev_t dev, u32 bits, u32 blocks, u64 start);
 static ssize_t write_disk(struct file *file, char *buf, size_t size);
 static void hb_do_callbacks(int type, void *ptr1, void *ptr2, int idx);
 static void hb_end_buffer_io_sync(struct buffer_head *bh, int uptodate);
@@ -115,13 +116,23 @@
 static wait_queue_head_t hb_cb_wq;
 static atomic_t hb_cb_ready = ATOMIC_INIT(0);
 
+//#if 0
+#define hbprintk(x, arg...)    printk("(hb:%d) " x, current->pid, ##arg)
+#define hbprintk0(x)           printk("(hb:%d) " x, current->pid)
+//#else
+#if 0
+#define hbprintk(x, arg...)    
+#define hbprintk0(x)           
+#endif
 
+
+
 static void hb_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
 {
 	if (uptodate)
 		set_buffer_uptodate(bh);
 	else {
-		printk("eek!  EIO!\n");
+		hbprintk("eek!  EIO!\n");
 		clear_buffer_uptodate(bh);
 	}
 	unlock_buffer(bh);
@@ -131,9 +142,9 @@
 
 static int hb_do_node_down(struct inode *group, struct inode *node, int idx)
 {
-	int ret;
-	printk("hb_do_node_down: group=%lu, node=%lu\n", group->i_ino, node->i_ino);
-	printk("NOT removing node from group\n");
+	//int ret;
+	hbprintk("hb_do_node_down: group=%lu, node=%lu\n", group->i_ino, node->i_ino);
+	hbprintk("NOT removing node from group\n");
 	//ret = nm_remove_node_from_group(group, node);
 	hb_do_callbacks(HB_NODE_DOWN_CB, group, node, idx);
 	return 0;
@@ -141,16 +152,20 @@
 
 static int hb_do_node_up(struct inode *group, struct inode *node, int idx)
 {
-	printk("hb_do_node_up: group=%lu, node=%lu\n", group->i_ino, node->i_ino);
+	hbprintk("hb_do_node_up: group=%lu, node=%lu\n", group->i_ino, node->i_ino);
 	hb_do_callbacks(HB_NODE_UP_CB, group, node, idx);
 	return 0;
 }
 
 static inline void hb_submit_bh(int rw, struct buffer_head *bh)
 {
-	printk("submit_bh: rw=%s, blocknr=%lu, mapped=%s\n",
-	       rw==WRITE?"write":"read", bh->b_blocknr, 
+#if 0
+	unsigned long long blocknr = bh->b_blocknr;
+
+	hbprintk("submit_bh: rw=%s, blocknr=%llu, mapped=%s\n",
+	       rw==WRITE?"write":"read", blocknr, 
 	       buffer_mapped(bh) ? "yes" : "no");
+#endif
 	submit_bh(rw, bh);
 }
 
@@ -192,7 +207,7 @@
 			node = slot->inode;
 
 			if (!node) {
-				printk("no inode in slot %d!\n", idx);
+				hbprintk("no inode in slot %d!\n", idx);
 				idx++;
 				continue;
 			}
@@ -241,7 +256,7 @@
 			bh = slot->bh;
 			node = slot->inode;
 			if (!node) {
-				printk("no inode in slot %d!\n", idx);
+				hbprintk("no inode in slot %d!\n", idx);
 				idx++;
 				continue;
 			}
@@ -252,7 +267,7 @@
 			hb_block = (hb_disk_heartbeat_block *)bh->b_data;
 			if (hb_block->time != slot->last_time) {
 				if (slot->state == HB_NODE_STATE_INIT) {
-					printk("first time for this node!\n");
+					hbprintk("first time for this node!\n");
 					live_nodes[ino] = node;
 					slot->state = HB_NODE_STATE_UP;
 				}
@@ -262,11 +277,11 @@
 				hb_do_callbacks(HB_NODE_RESPONDED_CB, group, node, idx);
 			} else {
 				slot->margin--;
-				printk("node %d missed.  margin=%d\n", ino, slot->margin);
+				hbprintk("node %d missed.  margin=%d\n", ino, slot->margin);
 			}
 
 			if (ino != nm_this_node(group) && slot->margin <= 0) {
-				printk("node %d JUST DIED!!!!\n", ino);
+				hbprintk("node %d JUST DIED!!!!\n", ino);
 				dead_nodes[ino] = node;
 				slot->state = HB_NODE_STATE_DOWN;
 			}
@@ -316,19 +331,19 @@
 		if (status == 0 || status == -ETIMEDOUT) {
 #if 0
 			if (atomic_read(&hb_thread_woken))
-				printk("aha!!! hb thread woken!\n");
+				hbprintk("aha!!! hb thread woken!\n");
 			else 
-				printk("hb thread timed out waiting, running again\n");
+				hbprintk("hb thread timed out waiting, running again\n");
 #endif
 			continue;
 		}
-		printk("hb thread got %d while waiting\n", status);
+		hbprintk("hb thread got %d while waiting\n", status);
 		break;
 	}
 
 	flush_scheduled_work();
 	complete (&hb_complete);
-	printk("quitting hb thread!!!!!!\n");
+	hbprintk("quitting hb thread!!!!!!\n");
 	return 0;
 }
 
@@ -346,22 +361,22 @@
 	hb_task = NULL;
 	init_completion (&hb_complete);
 
-	printk("starting hb thread...\n");
+	hbprintk("starting hb thread...\n");
 	hb_pid = kernel_thread (hb_thread, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
 	if (hb_pid < 0) {
-		printk("unable to launch hb thread, error=%d", hb_pid);
+		hbprintk("unable to launch hb thread, error=%d", hb_pid);
 		return -EINVAL;
 	}
-	printk("hb thread running...\n");
+	hbprintk("hb thread running...\n");
 	return 0;
 }
 
 static void hb_complete_thread(void)
 {
-	printk ("waiting for hb thread to exit....");
+	hbprintk ("waiting for hb thread to exit....");
 	send_sig (SIGINT, hb_task, 0);
 	wait_for_completion (&hb_complete);
-	printk ("hb thread exited\n");
+	hbprintk ("hb thread exited\n");
 	hb_task = NULL;
 }
 
@@ -371,7 +386,7 @@
 
 
 
-static int hb_init_disk_hb_group(struct inode *group, kdev_t dev, u32 bits, u32 blocks, u64 start)
+static int hb_init_disk_hb_group(struct inode *group, dev_t dev, u32 bits, u32 blocks, u64 start)
 {
 	int ret = -EINVAL;
 	cluster_disk *disk;
@@ -412,12 +427,12 @@
 	hb_op *data;
 	struct inode *group = NULL;
 	struct file *filp = NULL;
-	kdev_t dev;
+	dev_t dev;
 	int ret, tmpret;
 	nm_group_inode_private *priv;
 	u32 tmpmap[8];
 	
-	printk("write_disk\n");
+	hbprintk("write_disk\n");
 
         if (size < sizeof(*data))
                 return -EINVAL;
@@ -510,7 +525,7 @@
 	ops->num_ops = HB_WriteOpArraySize;
 	ops->write_op[HB_Disk] = write_disk;
 
-	printk("calling simple_fill_super...\n");
+	hbprintk("calling simple_fill_super...\n");
 	ret = simple_fill_super(sb, 0x5551212f, hb_files);
 	if (ret >= 0)
 		TA_GENERIC_SB_MEMBER(sb) = ops;
@@ -521,7 +536,7 @@
 
 static struct super_block *hb_read_super (struct super_block *sb, void *data, int silent)
 {
-	printk("welcome to hb_read_super!!!\n");
+	hbprintk("welcome to hb_read_super!!!\n");
 	return (hb_fill_super(sb, data, silent) < 0) ? NULL : sb;
 }
 
@@ -540,33 +555,33 @@
 	int ino, ret = 0;
 	u64 block;
 
-	printk("hb_nm_group_node_add_cb: group=%lu, node=%lu, idx=%u\n",
+	hbprintk("hb_nm_group_node_add_cb: group=%lu, node=%lu, idx=%u\n",
 	       group->i_ino, node->i_ino, idx);
 
 	down(&group->i_sem);	
 	priv = group->u.generic_ip;
 	if (!priv) {
-		printk("eek! bad group inode!\n");
+		hbprintk("eek! bad group inode!\n");
 		goto leave;
 	}
 	disk = &priv->disk;
 	if (disk->uuid[0]) {
 		ret = util_resize_rarray(&disk->slots, idx+1);
 		if (ret < 0) {
-			printk("eeeeeeek!!!! failed to resize disk state data\n");
+			hbprintk("eeeeeeek!!!! failed to resize disk state data\n");
 			goto leave;
 		}
 	
 		ino = nm_get_node_global_index(node);
 		if (ino > disk->num_blocks) {
-			printk("disk heartbeat area does not have enough blocks!\n");
+			hbprintk("disk heartbeat area does not have enough blocks!\n");
 			goto leave;
 		}
 		block = ino + disk->start_block;
 	
 		slot = util_rarray_idx_to_slot(&disk->slots, idx);
 		if (!slot) {
-			printk("eeeeeeek!!!! failed to get disk state data pointer: %d\n", idx);
+			hbprintk("eeeeeeek!!!! failed to get disk state data pointer: %d\n", idx);
 			goto leave;
 		}
 		slot->inode = igrab(node);
@@ -576,7 +591,7 @@
 		slot->bh = getblk(disk->dev, (int)block, (1 << disk->blocksize_bits));
 		slot->state = HB_NODE_STATE_INIT;
 	} else {
-		printk("doing nothing for group add for non-disk heartbeat group\n");
+		hbprintk("doing nothing for group add for non-disk heartbeat group\n");
 	}
 	
 leave:
@@ -593,23 +608,23 @@
 	nm_group_inode_private *priv;
 	int ret = -EINVAL;
 
-	printk("hb_nm_group_node_del_cb: group=%lu, node=%lu, idx=%u\n",
+	hbprintk("hb_nm_group_node_del_cb: group=%lu, node=%lu, idx=%u\n",
 	       group->i_ino, node->i_ino, idx);
 
 	down(&group->i_sem);
 	priv = group->u.generic_ip;
 	if (!priv) {
-		printk("eek! bad group inode!\n");
+		hbprintk("eek! bad group inode!\n");
 		goto leave;
 	}
 	disk = &priv->disk;
 	slot = util_rarray_idx_to_slot(&disk->slots, idx);
 	if (!slot) {
-		printk("eeeeeeek!!!! failed to get disk state data pointer: %d\n", idx);
+		hbprintk("eeeeeeek!!!! failed to get disk state data pointer: %d\n", idx);
 		goto leave;
 	}
 	if (slot->inode!=node) {
-		printk("eeeeeeek!!!! node inode changed!\n");
+		hbprintk("eeeeeeek!!!! node inode changed!\n");
 		goto leave;
 	}
 	iput(node);
@@ -622,7 +637,7 @@
 leave:
 
 	up(&group->i_sem);
-	printk("hb_nm_group_node_del_cb done: %d\n", ret);
+	hbprintk("hb_nm_group_node_del_cb done: %d\n", ret);
 	return;
 }
 
@@ -636,19 +651,19 @@
 	struct inode *group = ptr1;
 	nm_group_inode_private *priv;
 
-	printk("hb_nm_group_add_cb: group=%lu, idx=%u\n",
+	hbprintk("hb_nm_group_add_cb: group=%lu, idx=%u\n",
 	       group->i_ino, idx);
 	
 	priv = group->u.generic_ip;
 	if (!priv) {
-		printk("eek! bad group inode!\n");
+		hbprintk("eek! bad group inode!\n");
 		return;
 	}
 
 	spin_lock(&hb_lock);
 	list_add_tail(&priv->net_list, &hb_net_groups);
 	if (priv->disk.uuid[0]) {
-		printk("adding priv=%p inode=%p to disk group list\n", priv, group);
+		hbprintk("adding priv=%p inode=%p to disk group list\n", priv, group);
 		list_add_tail(&priv->disk_list, &hb_disk_groups);
 	}
 	spin_unlock(&hb_lock);
@@ -662,7 +677,7 @@
 static int __init init_hb(void)
 {
 	int retval=-1, i;
-	printk("loading heartbeat module: nodename is %s\n", nm_nodename);
+	hbprintk("loading heartbeat module: nodename is %s\n", nm_nodename);
 
 	if (proc_mkdir("cluster/heartbeat", 0)) {
 		// ???
@@ -710,7 +725,7 @@
 	hb_complete_thread();
 	hb_teardown();
 	unregister_filesystem(&hb_fs_type);
-	printk("unloading heartbeat module\n");
+	hbprintk("unloading heartbeat module\n");
 }
 
 static void hb_teardown(void)
@@ -742,7 +757,7 @@
 	if (priv->disk.uuid[0]) {
 		while ((slot = nm_iterate_group_disk_slots(group, &idx))) {
 			if (idx >= size-1) {
-				printk("map size (%d) too small for idx (%d)\n",
+				hbprintk("map size (%d) too small for idx (%d)\n",
 			       	size, idx);
 				up(&group->i_sem);
 				return -EINVAL;
@@ -752,7 +767,7 @@
 			idx++;
 		}
 	} else {
-		printk("filling straight from slot bitmap for non-disk heartbeat group\n");
+		hbprintk("filling straight from slot bitmap for non-disk heartbeat group\n");
 		memcpy(map, priv->slot_bitmap, size);
 	}
 
@@ -860,7 +875,7 @@
 	ret = hb_wait_on_callback_state(type);
 	if (ret < 0) {
 		spin_unlock(&hb_cb_lock);
-		printk("missed hb callback(%d) due to EINTR!\n", type);
+		hbprintk("missed hb callback(%d) due to EINTR!\n", type);
 		return;
 	}
 	hb_callback_state[type] = HB_CB_STATE_FROZEN;

Modified: trunk/cluster/nodemanager.c
===================================================================
--- trunk/cluster/nodemanager.c	2004-12-09 23:19:25 UTC (rev 1701)
+++ trunk/cluster/nodemanager.c	2004-12-14 05:49:17 UTC (rev 1702)
@@ -184,6 +184,15 @@
 #define NM_HASH_SIZE     (1 << NM_HASH_BITS)
 #define NM_HASH_MASK     (NM_HASH_SIZE - 1)
 
+
+
+#define nmprintk(x, arg...)    printk("(nm:%d) " x, current->pid, ##arg)
+#define nmprintk0(x)           printk("(nm:%d) " x, current->pid)
+#if 0
+#define nmprintk(x, arg...)
+#define nmprintk0(x)
+#endif
+
 static struct list_head *nm_ip_hash = NULL;
 static spinlock_t nm_ip_hash_lock;
 
@@ -207,7 +216,7 @@
 	int i;
 	
 	if ((PAGE_SIZE / sizeof(struct list_head)) < NM_HASH_SIZE) {
-		printk("eek!  hash size too big for this arch!\n");
+		nmprintk("eek!  hash size too big for this arch!\n");
 		BUG();
 	}
 
@@ -251,7 +260,7 @@
 		goto out;
 	name.name = file->name;
 	name.len = strlen(name.name);
-	printk("adding file %*s\n", name.len, name.name);
+	nmprintk("adding file %*s\n", name.len, name.name);
 	name.hash = full_name_hash(name.name, name.len);
 	dentry = d_alloc(parent, &name);
 	if (!dentry) {
@@ -294,23 +303,23 @@
 		goto out;
 	name.name = file->name;
 	name.len = strlen(name.name);
-	printk("adding link %*s\n", name.len, name.name);
+	nmprintk("adding link %*s\n", name.len, name.name);
 	name.hash = full_name_hash(name.name, name.len);
 	dentry = d_alloc(parent, &name);
 	if (!dentry) {
-		printk("failed to d_alloc\n");
+		nmprintk("failed to d_alloc\n");
 		dentry = ERR_PTR(-EINVAL);
 		goto out;
 	}
 	inode = iget(s, ino);
 	if (!inode) {
-		printk("failed to iget\n");
+		nmprintk("failed to iget\n");
 		dput(dentry);
 		dentry = ERR_PTR(-EINVAL);
 		goto out;
 	}
 	if (!inode->u.generic_ip) {
-		printk("bad inode: %d\n", ino);
+		nmprintk("bad inode: %d\n", ino);
 		iput(inode);
 		dput(dentry);
 		dentry = ERR_PTR(-EINVAL);
@@ -334,7 +343,7 @@
 {
 	int ret = -EINVAL;
 	
-	printk("create cluster...\n");
+	nmprintk("create cluster...\n");
 	
 	spin_lock(&nm_lock);
 	if (cluster.state == NM_CLUSTER_UP) {
@@ -358,7 +367,7 @@
 	int ret = -EINVAL;
 	nm_group_inode_private *g = NULL;
 
-	printk("create group...\n");
+	nmprintk("create group...\n");
 
 	data->arg_u.gc.name[NM_MAX_NAME_LEN] = '\0';
 	inode = nm_get_group_by_name(data->arg_u.gc.name);
@@ -378,7 +387,7 @@
 	spin_unlock(&cluster.bitmap_lock);
 
 	if (group_num < 0) {
-		printk("out of group slots!\n");
+		nmprintk("out of group slots!\n");
 		goto leave;
 	}
 
@@ -392,7 +401,7 @@
 		goto leave;
 	inode = igrab(dentry->d_inode);
 	if (!inode) {
-		printk("igrab failed!\n");
+		nmprintk("igrab failed!\n");
 		goto leave;
 	}
 		
@@ -437,7 +446,7 @@
 	int ret = -EINVAL;
 	nm_node_inode_private *n = NULL;
 
-	printk("add cluster node ...\n");
+	nmprintk("add cluster node ...\n");
 
 	data->arg_u.node.node_name[NM_MAX_NAME_LEN] = '\0';
 	inode = nm_get_node_by_name(data->arg_u.node.node_name);
@@ -451,7 +460,7 @@
 
 	node_num = data->arg_u.node.node_num;
 	if (node_num > NM_INVALID_SLOT_NUM) {
-		printk("bad node_num: %d\n", node_num);
+		nmprintk("bad node_num: %d\n", node_num);
 		goto leave;
 	}
 
@@ -460,7 +469,7 @@
 	spin_unlock(&cluster.bitmap_lock);
 
 	if (node_num < 0) {
-		printk("out of node slots!\n");
+		nmprintk("out of node slots!\n");
 		goto leave;
 	}
 
@@ -471,18 +480,18 @@
 	desc.mode = S_IFREG | S_IWUSR;
 	dentry = nm_add_file(single_sb, single_sb->s_root, &desc, ino);
 	if (IS_ERR(dentry)) {
-		printk("bad dentry\n");
+		nmprintk("bad dentry\n");
 		goto leave;
 	}
 	inode = igrab(dentry->d_inode);
 	if (!inode) {
-		printk("igrab failed!\n");
+		nmprintk("igrab failed!\n");
 		goto leave;
 	}
 		
 	n = kmalloc(sizeof(nm_node_inode_private), GFP_KERNEL);
 	if (!n) {
-		printk("could not kmalloc\n");
+		nmprintk("could not kmalloc\n");
 		goto leave;
 	}
 	memcpy(&n->node, &data->arg_u.node, sizeof(nm_node_info));
@@ -497,7 +506,7 @@
 	bucket = hash_long(n->node.ifaces[0].addr_u.ip_addr4, NM_HASH_BITS);
 	list_add_tail(&n->ip_hash, &nm_ip_hash[bucket]);
 	spin_unlock(&nm_ip_hash_lock);
-	printk("hashed ip %d.%d.%d.%d to bucket %d\n", NIPQUAD(n->node.ifaces[0].addr_u.ip_addr4), bucket);
+	nmprintk("hashed ip %d.%d.%d.%d to bucket %d\n", NIPQUAD(n->node.ifaces[0].addr_u.ip_addr4), bucket);
 	n->inode = inode;
 	inode->u.generic_ip = n;
 
@@ -537,7 +546,7 @@
 	u16 ino;
 	char tmpname[6];
 
-	printk("add node to group...\n");
+	nmprintk("add node to group...\n");
 
 	group_num = data->arg_u.gc.group_num;
 	ino = data->arg_u.gc.node_num;
@@ -581,7 +590,7 @@
 	child = nm_add_link(single_sb, dentry, &desc, 
 			    NM_NODE_INODE_START+ino);
 	if (IS_ERR(child)) {
-		printk("error adding link for %s\n", tmpname);
+		nmprintk("error adding link for %s\n", tmpname);
 		child = NULL;
 		goto leave;
 	}
@@ -612,7 +621,7 @@
 	int slot_num;
 	int ret = -EINVAL;
 
-	printk("remove node from group...\n");
+	nmprintk("remove node from group...\n");
 
 	slot_num = nm_get_group_index(group, node, &child);
 
@@ -623,12 +632,12 @@
 	if (!g)
 		goto leave;
 		
-	printk("killing the dentry now!!\n");
+	nmprintk("killing the dentry now!!\n");
 	down(&group->i_zombie);
 	node->i_nlink--;
 	d_delete(child);
 	up(&group->i_zombie);
-	printk("done killing the dentry!!\n");
+	nmprintk("done killing the dentry!!\n");
 
 
 	if (!igrab(node))
@@ -654,7 +663,7 @@
 {
 	int ret = -EINVAL;
 
-	printk("name cluster...\n");
+	nmprintk("name cluster...\n");
 	spin_lock(&nm_lock);
 	if (cluster.state == NM_CLUSTER_UP) {
 		ret = sprintf(buf, "%d: cluster name could not be set.  cluster already up.", -EINVAL);
@@ -671,7 +680,7 @@
 int nm_destroy_cluster(char *buf)
 {
 	int ret;
-	printk("destroy cluster...\n");
+	nmprintk("destroy cluster...\n");
 
 	/* TODO */
 	spin_lock(&nm_lock);
@@ -685,7 +694,7 @@
 {
 	int num_nodes=0, i;
 	
-	printk("get cluster num nodes...\n");
+	nmprintk("get cluster num nodes...\n");
 
 	spin_lock(&cluster.bitmap_lock);
 	for (i=0; i<8; i++)
@@ -699,7 +708,7 @@
 {
 	int num_groups=0, i;
 	
-	printk("get cluster num groups...\n");
+	nmprintk("get cluster num groups...\n");
 
 	spin_lock(&cluster.bitmap_lock);
 	for (i=0; i<8; i++)
@@ -714,7 +723,7 @@
 	int num_nodes=0, i;
 	nm_group_inode_private *g;
 	
-	printk("get group num nodes...\n");
+	nmprintk("get group num nodes...\n");
 	
 	g = group->u.generic_ip;
 	if (!g)
@@ -733,7 +742,7 @@
 	int last=0, i;
 	nm_group_inode_private *g;
 	
-	printk("get group num nodes...\n");
+	nmprintk("get group num nodes...\n");
 	
 	g = group->u.generic_ip;
 	if (!g)
@@ -794,7 +803,7 @@
 		for (i=0; i<NM_MAX_IFACES; i++) {
 			n = &priv->node.ifaces[i];
 			vers = ntohs(n->ip_version);
-			printk("ip_version=%u, vers=%u\n", n->ip_version, vers);
+			nmprintk("ip_version=%u, vers=%u\n", n->ip_version, vers);
 			if (vers!=4 && vers!=6)
 				continue;
 			/* TODO: how to print ipv6? */
@@ -853,7 +862,7 @@
 	int ret;
 	u16 me;
 	
-	printk("write_cluster\n");
+	nmprintk("write_cluster\n");
 
         if (size < sizeof(*data))
                 return -EINVAL;
@@ -891,7 +900,7 @@
 			ret = sprintf(buf, "%d: bad opcode: %u", -EINVAL, data->opcode);
 			break;
 	}
-	printk("leaving!\n");
+	nmprintk("leaving!\n");
 	return ret;
 }
 
@@ -900,7 +909,7 @@
 	nm_op *data;
 	int ret;
 	
-	printk("write_node\n");
+	nmprintk("write_node\n");
 
         if (size < sizeof(*data))
                 return -EINVAL;
@@ -916,7 +925,7 @@
 			ret = sprintf(buf, "%d: bad opcode: %u", -EINVAL, data->opcode);
 			break;
 	}
-	printk("leaving!\n");
+	nmprintk("leaving!\n");
 	return ret;
 }
 
@@ -925,7 +934,7 @@
 	nm_op *data;
 	int ret;
 	
-	printk("write_group\n");
+	nmprintk("write_group\n");
 
         if (size < sizeof(*data))
                 return -EINVAL;
@@ -933,7 +942,7 @@
 	if (data->magic != NM_OP_MAGIC)
 		return -EINVAL;
 
-	printk("opcode is %u, add_group is %u\n", data->opcode, NM_OP_ADD_GROUP_NODE);
+	nmprintk("opcode is %u, add_group is %u\n", data->opcode, NM_OP_ADD_GROUP_NODE);
 	switch (data->opcode) {
 		case NM_OP_GET_GROUP_INFO:
 			ret = nm_get_group_info(buf, data);
@@ -947,7 +956,7 @@
 			ret = sprintf(buf, "%d: bad opcode: %u", -EINVAL, data->opcode);
 			break;
 	}
-	printk("leaving!\n");
+	nmprintk("leaving!\n");
 	return ret;
 }
 
@@ -1043,7 +1052,7 @@
 		if (inode) {
 			inode = igrab(inode);
 			if (!inode->u.generic_ip || !S_ISREG (inode->i_mode)) {
-				printk("bad inode!\n");
+				nmprintk("bad inode!\n");
 				iput(inode);
 				inode = NULL;
 			}
@@ -1069,7 +1078,7 @@
 			if (!inode->u.generic_ip ||
 		    	(dir && !S_ISDIR (inode->i_mode)) ||
 		    	(!dir && !S_ISREG (inode->i_mode))) {
-				printk("bad inode!\n");
+				nmprintk("bad inode!\n");
 				iput(inode);
 				inode = NULL;
 			}
@@ -1102,7 +1111,7 @@
 	}
 	iput(inode);
 	dput(child);
-	//printk("for group=%p, this node is %u\n", group, node_num);
+	//nmprintk("for group=%p, this node is %u\n", group, node_num);
 	return node_num;
 }
 
@@ -1263,7 +1272,7 @@
 	ops->write_op[NM_Group] = write_group;
 
 	single_sb = NULL;
-	printk("calling simple_fill_super...\n");
+	nmprintk("calling simple_fill_super...\n");
 	ret = simple_fill_super(sb, 0x98675309, nm_files);
 	if (ret >= 0) {
 		TA_GENERIC_SB_MEMBER(sb) = ops;
@@ -1276,7 +1285,7 @@
 
 static struct super_block *nm_read_super (struct super_block *sb, void *data, int silent)
 {
-	printk("welcome to nm_read_super!!!\n");
+	nmprintk("welcome to nm_read_super!!!\n");
 	return (nm_fill_super(sb, data, silent) < 0) ? NULL : sb;
 }
 
@@ -1288,14 +1297,14 @@
 	int retval;
 	nm_nodename = kmalloc(strlen(system_utsname.nodename) + 1, GFP_KERNEL);
 	if (nm_nodename==NULL) {
-		printk("could not allocate a few bytes for nodename!\n");
+		nmprintk("could not allocate a few bytes for nodename!\n");
 		return -ENOMEM;
 	}
 	strcpy(nm_nodename, system_utsname.nodename);
-	printk("loading nm module: nodename is %s\n", nm_nodename);
+	nmprintk("loading nm module: nodename is %s\n", nm_nodename);
 
 	if (nm_init_ip_hash() < 0) {
-		printk("failed to allocate node IP hash\n");
+		nmprintk("failed to allocate node IP hash\n");
 		return -ENOMEM;
 	}
 
@@ -1305,9 +1314,9 @@
 		if (proc_mkdir("cluster/nm", 0)) {
 		}
 	}
-	printk("calling register_filesystem\n");
+	nmprintk("calling register_filesystem\n");
 	retval = register_filesystem(&nm_fs_type);
-	printk("done calling register_filesystem: ret=%d\n", retval);
+	nmprintk("done calling register_filesystem: ret=%d\n", retval);
 	if (retval)
 		nm_teardown();
 	return retval;
@@ -1319,7 +1328,7 @@
 	unregister_filesystem(&nm_fs_type);
 	nm_destroy_ip_hash();
 	kfree(nm_nodename);
-	printk("unloading nm module\n");
+	nmprintk("unloading nm module\n");
 }
 
 

Modified: trunk/cluster/tcp.c
===================================================================
--- trunk/cluster/tcp.c	2004-12-09 23:19:25 UTC (rev 1701)
+++ trunk/cluster/tcp.c	2004-12-14 05:49:17 UTC (rev 1702)
@@ -55,8 +55,8 @@
 #include "nodemanager.h"
 
 //#if 0
-#define netprintk(x, arg...)    printk("(%d) " x, current->pid, ##arg)
-#define netprintk0(x)           printk("(%d) " x, current->pid)
+#define netprintk(x, arg...)    printk("(tcp:%d) " x, current->pid, ##arg)
+#define netprintk0(x)           printk("(tcp:%d) " x, current->pid)
 //#else
 #if 0
 #define netprintk(x, arg...)    
@@ -885,7 +885,7 @@
 	spin_lock(&net_msg_num_lock);
 	msg->msg_num = net_msg_num;
 	if (net_msg_num == NET_MSG_NUM_MAX) {
-		printk("eek!  net_msg_num wrapping to 1 now...\n");
+		netprintk0("eek!  net_msg_num wrapping to 1 now...\n");
 		net_msg_num = 1;
 	}
 	spin_unlock(&net_msg_num_lock);
@@ -1028,7 +1028,7 @@
 rdtsc(u2.hilo[0], u2.hilo[1]);
 			net_do_status_return(hdr.msg_num, hdr.status);
 rdtsc(u3.hilo[0], u3.hilo[1]);
-printk("status return: net_dump_msg took %llu, net_do_status_return took %llu\n", u2.q-u1.q, u3.q-u2.q);
+netprintk("status return: net_dump_msg took %llu, net_do_status_return took %llu\n", u2.q-u1.q, u3.q-u2.q);
 			err = 0;
 			goto error;
 		} else if (hdr.magic != NET_MSG_MAGIC) {
@@ -1059,11 +1059,11 @@
 			netprintk0("no handler for message.\n");
 			goto error;
 		}
-printk("about to dispatch message\n");		
+netprintk0("about to dispatch message\n");		
 rdtsc(u1.hilo[0], u1.hilo[1]);
 		err = net_dispatch_message(inode, sock, &hdr, hnd);
 rdtsc(u2.hilo[0], u2.hilo[1]);
-printk("net_dispatch_message took %llu\n", u2.q-u1.q);
+netprintk("net_dispatch_message took %llu\n", u2.q-u1.q);
 
 		/* if node has requested status return, do it now */
 		if (hdr.status) {
@@ -1074,11 +1074,11 @@
 #endif
 			hdr.status = err;
 			hdr.magic = NET_MSG_STATUS_MAGIC;  // twiddle the magic
-printk("about to send return message, status=%d\n", err);
+netprintk("about to send return message, status=%d\n", err);
 rdtsc(u3.hilo[0], u3.hilo[1]);
 			tmperr = net_send_tcp_msg(inode, sock, &hdr, sizeof(net_msg));
 rdtsc(u4.hilo[0], u4.hilo[1]);
-printk("status return (net_send_tcp_msg) took %llu\n", u4.q-u3.q);
+netprintk("status return (net_send_tcp_msg) took %llu\n", u4.q-u3.q);
 		} else if (err < 0) {
 			netprintk("dispatch (%u/%u) returned %d\n",
 				  hdr.msg_type, hdr.key, err);
@@ -1091,7 +1091,7 @@
 		spin_lock(&net_list_lock);
 		list_add_tail(&net->list, &net_recv_list);
 		spin_unlock(&net_list_lock);
-printk("all done with this round, starting over\n");		
+netprintk0("all done with this round, starting over\n");		
 		goto start_over;
 
 error:

Modified: trunk/cluster/tcp.h
===================================================================
--- trunk/cluster/tcp.h	2004-12-09 23:19:25 UTC (rev 1701)
+++ trunk/cluster/tcp.h	2004-12-14 05:49:17 UTC (rev 1702)
@@ -31,6 +31,7 @@
 #include <linux/socket.h>
 #ifdef __KERNEL__
 #include <net/sock.h>
+#include <linux/tcp.h>
 #else
 #include <sys/socket.h>
 #endif
@@ -162,8 +163,14 @@
 static inline int net_link_down(int err, struct socket *sock)
 {
 	if (sock) {
+/* the alternative is to #define around the members */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
 		if (sock->sk->state != TCP_ESTABLISHED &&
 	    	    sock->sk->state != TCP_CLOSE_WAIT)
+#else
+		if (sock->sk->sk_state != TCP_ESTABLISHED &&
+	    	    sock->sk->sk_state != TCP_CLOSE_WAIT)
+#endif
 			return 1;
 	}
 

Modified: trunk/cluster/warning_hack.h
===================================================================
--- trunk/cluster/warning_hack.h	2004-12-09 23:19:25 UTC (rev 1701)
+++ trunk/cluster/warning_hack.h	2004-12-14 05:49:17 UTC (rev 1702)
@@ -33,7 +33,6 @@
 
 extern __inline__ int generic_fls(int x);
 extern __inline__ int get_bitmask_order(unsigned int count);
-extern inline void mark_info_dirty(struct mem_dqinfo *info);
 extern inline int rq_data_dir(struct request *rq);
 	
 

Modified: trunk/src/dlmglue.c
===================================================================
--- trunk/src/dlmglue.c	2004-12-09 23:19:25 UTC (rev 1701)
+++ trunk/src/dlmglue.c	2004-12-14 05:49:17 UTC (rev 1702)
@@ -213,12 +213,15 @@
 	ocfs2_lvb *lvb = (ocfs2_lvb *) lockres->l_lksb.lvb;
 	int ret = 0;
 
+	LOG_ENTRY();
+	
 	spin_lock(&lockres->l_lock);
 	if (lvb->lvb_seq &&
 	    lockres->l_local_seq == lvb->lvb_seq)
 		ret = 1;
 	spin_unlock(&lockres->l_lock);
 
+	LOG_EXIT_STATUS(ret);
 	return ret;
 }
 
@@ -226,10 +229,14 @@
 {
 	ocfs2_lvb *lvb = (ocfs2_lvb *) lockres->l_lksb.lvb;
 
+	LOG_ENTRY();
+
 	spin_lock(&lockres->l_lock);
 	if (lvb->lvb_seq)
 		lockres->l_local_seq = lvb->lvb_seq;
 	spin_unlock(&lockres->l_lock);
+
+	LOG_EXIT();
 }
 
 /* fill in new values as we add them to the lvb. */
@@ -237,6 +244,9 @@
 					     unsigned int *trunc_clusters)
 {
 	ocfs2_meta_lvb *lvb;
+	
+	LOG_ENTRY();
+	
 	OCFS_ASSERT(lockres->l_type == OCFS_TYPE_DATA);
 
 	spin_lock(&lockres->l_lock);
@@ -247,6 +257,8 @@
 		*trunc_clusters = lvb->lvb_trunc_clusters;
 
 	spin_unlock(&lockres->l_lock);
+
+	LOG_EXIT();
 }
 
 static int ocfs2_build_lock_name(enum ocfs2_lock_type type,
@@ -287,6 +299,7 @@
 				       enum ocfs2_lock_type type,
 				       void *priv)
 {
+	LOG_ENTRY();
 	memset(res, 0, sizeof(ocfs2_lock_res));
 	spin_lock_init(&res->l_lock);
 	init_waitqueue_head(&res->l_event);
@@ -294,6 +307,7 @@
 	res->l_level = LKM_IVMODE;
 	INIT_LIST_HEAD(&res->l_blocked_list);
 	res->l_priv = priv;
+	LOG_EXIT();
 }
 
 int ocfs2_inode_lock_res_init(ocfs2_lock_res *res,
@@ -356,13 +370,17 @@
 
 void ocfs2_lock_res_free(ocfs2_lock_res *res)
 {
+	LOG_ENTRY();
 	if (res->l_name)
 		kfree(res->l_name);
+	LOG_EXIT();
 }
 
 static inline void ocfs2_inc_holders(ocfs2_lock_res *lockres,
 				     int level)
 {
+	LOG_ENTRY();
+
 	OCFS_ASSERT(lockres);
 
 	switch(level) {
@@ -375,11 +393,14 @@
 	default:
 		BUG();
 	}
+	LOG_EXIT();
 }
 
 static inline void ocfs2_dec_holders(ocfs2_lock_res *lockres,
 				     int level)
 {
+	LOG_ENTRY();
+
 	OCFS_ASSERT(lockres);
 
 	switch(level) {
@@ -394,10 +415,13 @@
 	default:
 		BUG();
 	}
+	LOG_EXIT();
 }
 
 static inline void ocfs2_generic_handle_downconvert_action(ocfs2_lock_res *lockres)
 {
+	LOG_ENTRY();
+	
 	OCFS_ASSERT(lockres->l_flags & OCFS2_LOCK_BUSY);
 	OCFS_ASSERT(lockres->l_flags & OCFS2_LOCK_ATTACHED);
 	OCFS_ASSERT(lockres->l_flags & OCFS2_LOCK_BLOCKED);
@@ -407,13 +431,19 @@
 	lockres->l_flags &= ~OCFS2_LOCK_BLOCKED;
 	lockres->l_flags &= ~OCFS2_LOCK_BUSY;
 	wake_up(&lockres->l_event);
+
+	LOG_EXIT();
 }
 
 static void ocfs2_inc_inode_seq(ocfs_super *osb,
 				struct inode *inode)
 {
-	atomic_t *seq = GET_INODE_CLEAN_SEQ(inode);
+	atomic_t *seq;
+       	
+	LOG_ENTRY();
 
+	seq = GET_INODE_CLEAN_SEQ(inode);
+
 	LOG_TRACE_ARGS("incrementing inode seq... current is %d\n", 
 		       atomic_read(seq));
 
@@ -431,10 +461,14 @@
 
 	LOG_TRACE_ARGS("done incrementing inode seq... new is %d\n", 
 		       atomic_read(seq));
+
+	LOG_EXIT();
 }
 
 static inline void ocfs2_generic_handle_convert_action(ocfs2_lock_res *lockres)
 {
+	LOG_ENTRY();
+
 	OCFS_ASSERT(lockres->l_flags & OCFS2_LOCK_BUSY);
 	OCFS_ASSERT(lockres->l_flags & OCFS2_LOCK_ATTACHED);
 
@@ -447,6 +481,7 @@
 
 	lockres->l_level = lockres->l_requested;
 	lockres->l_flags &= ~OCFS2_LOCK_BUSY;
+	LOG_EXIT();
 }
 
 static inline void ocfs2_handle_meta_convert_action(struct inode *inode,
@@ -454,14 +489,20 @@
 {
 	ocfs_super *osb = OCFS2_SB(inode->i_sb);
 
+	LOG_ENTRY();
+
 	/* generic_handle_convert_action will set the refresh flag for us. */
 	if (lockres->l_level == LKM_NLMODE)
 		ocfs2_inc_inode_seq(osb, inode);
 	ocfs2_generic_handle_convert_action(lockres);
+
+	LOG_EXIT();
 }
 
 static inline void ocfs2_generic_handle_attach_action(ocfs2_lock_res *lockres)
 {
+	LOG_ENTRY();
+
 	OCFS_ASSERT(lockres->l_flags & OCFS2_LOCK_BUSY);
 	OCFS_ASSERT(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
 
@@ -472,15 +513,22 @@
 	lockres->l_level = lockres->l_requested;
 	lockres->l_flags |= OCFS2_LOCK_ATTACHED;
 	lockres->l_flags &= ~OCFS2_LOCK_BUSY;
+
+	LOG_EXIT();
 }
 
 static void ocfs2_inode_ast_func(void *opaque)
 {
 	ocfs2_lock_res *lockres = opaque;
-	struct inode *inode = ocfs2_lock_res_inode(lockres);
-	ocfs_super *osb = OCFS2_SB(inode->i_sb);
+	struct inode *inode;
+	ocfs_super *osb;
 	dlm_lockstatus *lksb;
 
+	LOG_ENTRY();
+	
+	inode = ocfs2_lock_res_inode(lockres);
+	osb = OCFS2_SB(inode->i_sb);
+
 #ifdef OCFS2_VERBOSE_LOCKING_TRACE
 	printk("AST fired for inode %llu\n", OCFS_I(inode)->ip_blkno);
 #endif
@@ -492,6 +540,7 @@
 		printk("ocfs2_inode_ast_func: lksb status value of %u on "
 		       "inode %llu\n", lksb->status, OCFS_I(inode)->ip_blkno);
 		spin_unlock(&lockres->l_lock);
+		LOG_EXIT();
 		return;
 	}
 
@@ -526,23 +575,34 @@
 	lockres->l_action = OCFS2_AST_INVALID;
 	spin_unlock(&lockres->l_lock);
 	wake_up(&lockres->l_event);
+
+	LOG_EXIT();
 }
 
 static void ocfs2_generic_handle_bast(ocfs2_lock_res *lockres, int level)
 {
+	LOG_ENTRY();
+
 	spin_lock(&lockres->l_lock);
 	lockres->l_flags |= OCFS2_LOCK_BLOCKED;
 	if (level > lockres->l_blocking)
 		lockres->l_blocking = level;
 	spin_unlock(&lockres->l_lock);
+
+	LOG_EXIT();
 }
 
 static void ocfs2_inode_bast_func(void *opaque, int level)
 {
 	ocfs2_lock_res *lockres = opaque;
-	struct inode *inode = ocfs2_lock_res_inode(lockres);
-	ocfs_super *osb = OCFS2_SB(inode->i_sb);
+	struct inode *inode;
+	ocfs_super *osb;
 
+	LOG_ENTRY();
+
+	inode = ocfs2_lock_res_inode(lockres);
+	osb = OCFS2_SB(inode->i_sb);
+
 #ifdef OCFS2_VERBOSE_LOCKING_TRACE
 	printk("BAST fired for inode %llu\n", OCFS_I(inode)->ip_blkno);
 #endif
@@ -550,6 +610,7 @@
 
 	ocfs2_schedule_blocked_inode_lock(inode, lockres);
 	ocfs2_kick_vote_thread(osb);
+	LOG_EXIT();
 }
 
 static void ocfs2_super_ast_func(void *opaque)
@@ -557,8 +618,10 @@
 	ocfs2_lock_res *lockres = opaque;
 	dlm_lockstatus *lksb;
 
+	LOG_ENTRY();
+
 #ifdef OCFS2_VERBOSE_LOCKING_TRACE
-	printk("AST fired for inode %llu\n", OCFS_I(inode)->ip_blkno);
+	printk("Superblock AST fired\n");
 #endif
 	OCFS_ASSERT(ocfs2_is_super_lock(lockres));
 
@@ -589,13 +652,18 @@
 	lockres->l_action = OCFS2_AST_INVALID;
 	spin_unlock(&lockres->l_lock);
 	wake_up(&lockres->l_event);
+
+	LOG_EXIT();
 }
 
 static void ocfs2_super_bast_func(void *opaque, int level)
 {
 	ocfs2_lock_res *lockres = opaque;
-	ocfs_super *osb = ocfs2_lock_res_super(lockres);
+	ocfs_super *osb;
 
+	LOG_ENTRY();
+       	osb = ocfs2_lock_res_super(lockres);
+
 #ifdef OCFS2_VERBOSE_LOCKING_TRACE
 	printk("Superblock BAST fired\n");
 #endif
@@ -603,11 +671,14 @@
 
 	ocfs2_schedule_blocked_lock(osb, lockres);
 	ocfs2_kick_vote_thread(osb);
+
+	LOG_EXIT();
 }
 
 static inline void ocfs2_recover_from_dlm_error(ocfs2_lock_res *lockres,
 						int convert)
 {
+	LOG_ENTRY();
 	spin_lock(&lockres->l_lock);
 	lockres->l_flags &= ~OCFS2_LOCK_BUSY;
 	if (convert)
@@ -615,6 +686,7 @@
 	else
 		lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
 	spin_unlock(&lockres->l_lock);
+	LOG_EXIT();
 }
 
 static int ocfs2_lock_create(ocfs_super *osb,
@@ -661,34 +733,43 @@
 					int flag)
 {
 	int ret;
+	LOG_ENTRY();
 	spin_lock(&lockres->l_lock);
 	ret = lockres->l_flags & flag;
 	spin_unlock(&lockres->l_lock);
+	LOG_EXIT_STATUS(ret);
 	return ret;
 }
 
 static inline void ocfs2_wait_on_busy_lock(ocfs2_lock_res *lockres)
 
 {
+	LOG_ENTRY();
 	wait_event_interruptible(lockres->l_event,
 				 !ocfs2_check_wait_flag(lockres,
 							OCFS2_LOCK_BUSY));
+	LOG_EXIT();
 }
 
 static inline void ocfs2_wait_on_blocked_lock(ocfs2_lock_res *lockres)
 
 {
+	LOG_ENTRY();
 	wait_event_interruptible(lockres->l_event,
 				 !ocfs2_check_wait_flag(lockres,
 							OCFS2_LOCK_BLOCKED));
+	LOG_EXIT();
 }
 
 static inline void ocfs2_wait_on_refreshing_lock(ocfs2_lock_res *lockres)
 
 {
+	LOG_ENTRY();
 	wait_event_interruptible(lockres->l_event,
 				 !ocfs2_check_wait_flag(lockres,
-							OCFS2_LOCK_REFRESHING));}
+							OCFS2_LOCK_REFRESHING));
+	LOG_EXIT();
+}
 
 /* predict what lock level we'll be dropping down to on behalf
  * of another node, and return true if the currently wanted
@@ -797,10 +878,12 @@
 				 ocfs2_lock_res *lockres,
 				 int level)
 {
+	LOG_ENTRY();
 	spin_lock(&lockres->l_lock);
 	ocfs2_dec_holders(lockres, level);
 	ocfs2_vote_on_unlock(osb, lockres);
 	spin_unlock(&lockres->l_lock);
+	LOG_EXIT();
 }
 
 /* Grants us an EX lock on the data and metadata resources, skipping
@@ -889,6 +972,8 @@
 {
 	int kick = 0;
 
+	LOG_ENTRY();
+
 	/* If we know that another node is waiting on our lock, kick
 	 * the vote thread * pre-emptively when we reach a release
 	 * condition. */
@@ -909,6 +994,8 @@
 
 	if (kick)
 		ocfs2_kick_vote_thread(osb);
+
+	LOG_EXIT();
 }
 
 void ocfs2_data_unlock(struct inode *inode,
@@ -917,6 +1004,8 @@
 	int level = write ? LKM_EXMODE : LKM_PRMODE;
 	ocfs2_lock_res *lockres = &OCFS_I(inode)->ip_data_lockres;
 
+	LOG_ENTRY();
+
 #ifdef OCFS2_VERBOSE_LOCKING_TRACE
 	printk("ocfs2: (%u) inode %llu drop %s DATA lock\n",
 	       OCFS_I(inode)->ip_blkno, current->pid,
@@ -924,18 +1013,26 @@
 #endif
 
 	ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
+
+	LOG_EXIT();
 }
 
 static inline int ocfs2_wait_on_recovery(ocfs_super *osb)
 {
+	int ret;
+
+	LOG_ENTRY();
+
 	wait_event_interruptible(osb->recovery_event,
 				 ocfs_node_map_is_empty(osb,
 							&osb->recovery_map));
 
+	ret = 0;
 	if (signal_pending(current))
-		return -EINTR;
+		ret = -EINTR;
 
-	return 0;
+	LOG_EXIT_STATUS(ret);
+	return ret;
 }
 
 /* Call this with the lockres locked. I am reasonably sure we don't
@@ -947,6 +1044,8 @@
 	ocfs2_lock_res *lockres = &oip->ip_meta_lockres;
 	ocfs2_meta_lvb *lvb     = (ocfs2_meta_lvb *) lockres->l_lksb.lvb;
 
+	LOG_ENTRY();
+
 	lvb->lvb_iclusters = oip->ip_clusters;
 	lvb->lvb_iuid      = inode->i_uid;
 	lvb->lvb_igid      = inode->i_gid;
@@ -956,6 +1055,8 @@
 	lvb->lvb_iatime    = ocfs_get_seconds(inode->i_atime);
 	lvb->lvb_ictime    = ocfs_get_seconds(inode->i_ctime);
 	lvb->lvb_imtime    = ocfs_get_seconds(inode->i_mtime);
+
+	LOG_EXIT();
 }
 
 static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
@@ -964,6 +1065,8 @@
 	ocfs2_lock_res *lockres = &oip->ip_meta_lockres;
 	ocfs2_meta_lvb *lvb     = (ocfs2_meta_lvb *) lockres->l_lksb.lvb;
 
+	LOG_ENTRY();
+
 	/* We're safe here without the lockres lock... */
 	spin_lock(&oip->ip_lock);
 	oip->ip_clusters = lvb->lvb_iclusters;
@@ -978,6 +1081,8 @@
 	OCFS_SET_INODE_TIME(inode, i_ctime, lvb->lvb_ictime);
 	OCFS_SET_INODE_TIME(inode, i_mtime, lvb->lvb_imtime);
 	spin_unlock(&oip->ip_lock);
+
+	LOG_EXIT();
 }
 
 static void ocfs2_reset_meta_lvb_values(struct inode *inode)
@@ -986,6 +1091,8 @@
 	ocfs2_meta_lvb *lvb = (ocfs2_meta_lvb *) lockres->l_lksb.lvb;
 	u32 i_clusters;
 
+	LOG_ENTRY();
+
 	spin_lock(&OCFS_I(inode)->ip_lock);
 	i_clusters = OCFS_I(inode)->ip_clusters;
 	spin_unlock(&OCFS_I(inode)->ip_lock);
@@ -994,6 +1101,8 @@
 	if (lockres->l_level == LKM_EXMODE)
 		lvb->lvb_trunc_clusters = i_clusters;
 	spin_unlock(&lockres->l_lock);
+
+	LOG_EXIT();
 }
 
 static void __ocfs2_lvb_on_downconvert(ocfs2_lock_res *lockres,
@@ -1001,6 +1110,8 @@
 {
 	ocfs2_lvb *lvb = (ocfs2_lvb *) lockres->l_lksb.lvb;
 
+	LOG_ENTRY();
+
 	if (lockres->l_level == LKM_EXMODE) {
 		lvb->lvb_seq++;
 		/* Overflow? */
@@ -1013,6 +1124,8 @@
 		if (lvb->lvb_seq)
 			lockres->l_local_seq++;
 	}
+
+	LOG_EXIT();
 }
 
 /* Determine whether a lock resource needs to be refreshed, and
@@ -1059,6 +1172,8 @@
 static inline void ocfs2_complete_lock_res_refresh(ocfs2_lock_res *lockres,
 						   int status)
 {
+	LOG_ENTRY();
+	
 	spin_lock(&lockres->l_lock);
 	lockres->l_flags &= ~OCFS2_LOCK_REFRESHING;
 	if (!status)
@@ -1066,6 +1181,8 @@
 	spin_unlock(&lockres->l_lock);
 
 	wake_up(&lockres->l_event);
+	
+	LOG_EXIT();
 }
 
 /* may or may not return a bh if it went to disk. */
@@ -1077,6 +1194,8 @@
 	ocfs2_lock_res *lockres;
 	ocfs2_dinode *fe;
 
+	LOG_ENTRY();
+
 	lockres = &OCFS_I(inode)->ip_meta_lockres;
 
 	status = ocfs2_should_refresh_lock_res(lockres);
@@ -1130,6 +1249,7 @@
 
 	ocfs2_complete_lock_res_refresh(lockres, 0);
 bail:
+	LOG_EXIT_STATUS(status);
 	return status;
 }
 
@@ -1220,6 +1340,8 @@
 	int level = ex ? LKM_EXMODE : LKM_PRMODE;
 	ocfs2_lock_res *lockres = &OCFS_I(inode)->ip_meta_lockres;
 
+	LOG_ENTRY();
+
 #ifdef OCFS2_VERBOSE_LOCKING_TRACE
 	printk("ocfs2: (%u) inode %llu drop %s META lock\n",
 	       OCFS_I(inode)->ip_blkno, current->pid,
@@ -1227,6 +1349,7 @@
 #endif
 
 	ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
+	LOG_EXIT();
 }
 
 int ocfs2_super_lock(ocfs_super *osb,
@@ -1328,6 +1451,7 @@
 {
 	int wait_on_vote_task = 0;
 
+	LOG_ENTRY();
 	ocfs2_drop_super_lock(osb);
 
 	/* needs to be able to deal with the dlm being in many
@@ -1345,12 +1469,15 @@
 
 	ocfs2_lock_res_free(&osb->super_lockres);
 	dlm_unregister_domain(osb->dlm);
+	LOG_EXIT();
 }
 
 static void ocfs2_unlock_ast_func(void *opaque, dlm_status status)
 {
 	ocfs2_lock_res *lockres = opaque;
 
+	LOG_ENTRY();
+
 	if (status != DLM_NORMAL)
 		LOG_ERROR_ARGS("Dlm returns status %d\n", status);
 
@@ -1370,6 +1497,8 @@
 	spin_unlock(&lockres->l_lock);
 
 	wake_up(&lockres->l_event);
+	
+	LOG_EXIT();
 }
 
 /* BEWARE: called with lockres lock, and always drops it. */
@@ -1426,12 +1555,14 @@
 	int status;
 	ocfs2_lock_res *lockres;
 
+	LOG_ENTRY();
 	lockres = &osb->super_lockres;
 
 	spin_lock(&lockres->l_lock);
 	status = __ocfs2_drop_lock(osb, lockres);
 	if (status < 0)
 		LOG_ERROR_STATUS(status);
+	LOG_EXIT_STATUS(status);
 }
 
 int ocfs2_drop_inode_locks(struct inode *inode)
@@ -1439,6 +1570,7 @@
 	int status, err;
 	ocfs2_lock_res *lockres;
 
+	LOG_ENTRY();
 	lockres = &OCFS_I(inode)->ip_data_lockres;
 	spin_lock(&lockres->l_lock);
 	err = __ocfs2_drop_lock(OCFS2_SB(inode->i_sb), lockres);
@@ -1464,6 +1596,7 @@
 	if (err < 0 && !status)
 		status = err;
 
+	LOG_EXIT_STATUS(status);
 	return status;
 }
 
@@ -1488,8 +1621,14 @@
 				    int lvb)
 {
 	int status, flags = LKM_CONVERT;
+
+	LOG_ENTRY();
+
 	OCFS_ASSERT(lockres->l_blocking > LKM_NLMODE);
-	OCFS_ASSERT(lockres->l_level > new_level);
+	if (lockres->l_level <= new_level) {
+		LOG_ERROR_ARGS("lockres->l_level (%u) <= new_level (%u)\n", lockres->l_level, new_level);
+		BUG();
+	}
 
 	lockres->l_action = OCFS2_AST_DOWNCONVERT;
 	lockres->l_requested = new_level;
@@ -1515,6 +1654,7 @@
 	}
 	status = 0;
 bail:
+	LOG_EXIT_STATUS(status);
 	return status;
 }
 
@@ -1524,6 +1664,8 @@
 {
 	int status;
 
+	LOG_ENTRY();
+
 	/* were we in a convert when we got the bast fire? */
 	OCFS_ASSERT(lockres->l_action == OCFS2_AST_CONVERT ||
 		    lockres->l_action == OCFS2_AST_DOWNCONVERT);
@@ -1553,6 +1695,7 @@
 		ocfs2_recover_from_dlm_error(lockres, 0);
 	}
 
+	LOG_EXIT_STATUS(status);
 	return status;
 }
 
@@ -1562,6 +1705,8 @@
 {
 	int ret;
 
+	LOG_ENTRY();
+
 	OCFS_ASSERT(new_level == LKM_NLMODE || new_level == LKM_PRMODE);
 	if (new_level == LKM_PRMODE)
 		ret = !lockres->l_ex_holders && 
@@ -1570,6 +1715,7 @@
 		ret = !lockres->l_ro_holders && !lockres->l_ex_holders &&
 			ocfs_inode_fully_checkpointed(inode);
 
+	LOG_EXIT_STATUS(ret);
 	return ret;
 }
 
@@ -1578,14 +1724,22 @@
 {
 	int new_level;
 	int set_lvb = 0;
-	ocfs2_lock_res *lockres = &OCFS_I(inode)->ip_meta_lockres;
+	int ret = 0;
+	ocfs2_lock_res *lockres;
 
+	LOG_ENTRY();
+
+       	lockres = &OCFS_I(inode)->ip_meta_lockres;
+
 	spin_lock(&lockres->l_lock);
 	if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) {
 		spin_unlock(&lockres->l_lock);
-		return 0;
+		ret = 0;
+		goto leave;
 	}
 
+	LOG_TRACE_ARGS("l_level=%d, l_blocking=%d\n", lockres->l_level, lockres->l_blocking);
+
 	OCFS_ASSERT(lockres->l_level == LKM_EXMODE || 
 		    lockres->l_level == LKM_PRMODE);
 
@@ -1596,31 +1750,40 @@
 			 * then just drop the spinlock and requeue ourselves
 			 * to check again later. */
 			spin_unlock(&lockres->l_lock);
-			return 0;
+			ret = 0;
+			goto leave;
 		}
 
-		return __ocfs2_cancel_convert(OCFS2_SB(inode->i_sb),
+		ret = __ocfs2_cancel_convert(OCFS2_SB(inode->i_sb),
 					      lockres);
+		goto leave;
 	}
 
 	new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
+	
+	LOG_TRACE_ARGS("l_level=%d, l_blocking=%d, new_level=%d\n", lockres->l_level, lockres->l_blocking, new_level);
+
 	if (ocfs2_can_downconvert_meta_lock(inode, lockres, new_level)) {
 		if (lockres->l_level == LKM_EXMODE) {
 			__ocfs2_stuff_meta_lvb(inode);
 			set_lvb = 1;
 		}
 		__ocfs2_lvb_on_downconvert(lockres, new_level);
-		return __ocfs2_downconvert_lock(OCFS2_SB(inode->i_sb),
+		LOG_TRACE_ARGS("calling __ocfs2_downconvert_lock with l_level=%d, l_blocking=%d, new_level=%d\n", lockres->l_level, lockres->l_blocking, new_level);
+		ret = __ocfs2_downconvert_lock(OCFS2_SB(inode->i_sb),
 						lockres, new_level,
 						set_lvb);
+		goto leave;
 	}
 	if (!ocfs_inode_fully_checkpointed(inode))
 		ocfs_start_checkpoint(OCFS2_SB(inode->i_sb));
 
 	*requeue++;
 	spin_unlock(&lockres->l_lock);
-
-	return 0;
+	ret = 0;
+leave:
+	LOG_EXIT_STATUS(ret);
+	return ret;
 }
 
 static int ocfs2_generic_unblock_lock(ocfs_super *osb,
@@ -1630,12 +1793,16 @@
 {
 	int blocking;
 	int new_level;
+	int ret = 0;
 
+	LOG_ENTRY();
+
 	spin_lock(&lockres->l_lock);
 	if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) {
 		spin_unlock(&lockres->l_lock);
 		*requeue = 0;
-		return 0;
+		ret = 0;
+		goto leave;
 	}
 
 	OCFS_ASSERT(!(lockres->l_flags & OCFS2_LOCK_BUSY));
@@ -1648,10 +1815,12 @@
 			 * then just drop the spinlock and requeue ourselves
 			 * to check again later. */
 			spin_unlock(&lockres->l_lock);
-			return 0;
+			ret = 0;
+			goto leave;
 		}
 
-		return __ocfs2_cancel_convert(osb, lockres);
+		ret = __ocfs2_cancel_convert(osb, lockres);
+		goto leave;
 	}
 
 	/* if we're blocking an exclusive and we have *any* holders,
@@ -1660,7 +1829,8 @@
 	    && (lockres->l_ex_holders || lockres->l_ro_holders)) {
 		spin_unlock(&lockres->l_lock);
 		*requeue = 1;
-		return 0;
+		ret = 0;
+		goto leave;
 	}
 
 	/* If it's a PR we're blocking, then only
@@ -1669,7 +1839,8 @@
 	    lockres->l_ex_holders) {
 		spin_unlock(&lockres->l_lock);
 		*requeue = 1;
-		return 0;
+		ret = 0;
+		goto leave;
 	}
 
 	/* If we get here, then we know that there are no more
@@ -1697,26 +1868,39 @@
 downconvert:
 	*requeue = 0;
 	new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
-	return __ocfs2_downconvert_lock(osb, lockres, new_level, 0);
+	
+	ret = __ocfs2_downconvert_lock(osb, lockres, new_level, 0);
+leave:
+	LOG_EXIT_STATUS(ret);
+	return ret;
 }
 
 static void ocfs2_data_convert_worker(ocfs2_lock_res *lockres,
 				      int blocking)
 {
-	struct inode *inode = ocfs2_lock_res_inode(lockres);
+	struct inode *inode;
+	
+	LOG_ENTRY();
+       	inode = ocfs2_lock_res_inode(lockres);
 
         sync_mapping_buffers(inode->i_mapping);
         if (blocking == LKM_EXMODE)
                 ocfs_truncate_inode_pages(inode, 0);
+	LOG_EXIT();
 }
 
 int ocfs2_unblock_data(ocfs2_lock_res *lockres,
 			       int *requeue)
 {
 	int status;
-	struct inode *inode = ocfs2_lock_res_inode(lockres);
-	ocfs_super *osb = OCFS2_SB(inode->i_sb);
+	struct inode *inode;
+	ocfs_super *osb;
 
+	LOG_ENTRY();
+
+	inode = ocfs2_lock_res_inode(lockres);
+	osb = OCFS2_SB(inode->i_sb);
+
 	status = ocfs2_generic_unblock_lock(osb,
 					    lockres,
 					    requeue,
@@ -1734,6 +1918,7 @@
 	iput(inode);
 	*requeue = 0;
 
+	LOG_EXIT_STATUS(status);
 	return status;
 }
 
@@ -1741,8 +1926,12 @@
 		       int *requeue)
 {
 	int status;
-	struct inode *inode = ocfs2_lock_res_inode(lockres);
+	struct inode *inode;
 
+	LOG_ENTRY();
+
+       	inode = ocfs2_lock_res_inode(lockres);
+
 	status = ocfs2_do_unblock_meta(inode, requeue);
 	if (status < 0)
 		LOG_ERROR_STATUS(status);
@@ -1755,6 +1944,7 @@
 	iput(inode);
 	*requeue = 0;
 
+	LOG_EXIT_STATUS(status);
 	return status;
 }
 
@@ -1762,8 +1952,12 @@
 			       int *requeue)
 {
 	int status;
-	ocfs_super *osb = ocfs2_lock_res_super(lockres);
+	ocfs_super *osb;
+       
+	LOG_ENTRY();
 
+	osb = ocfs2_lock_res_super(lockres);
+
 	status = ocfs2_generic_unblock_lock(osb,
 					    lockres,
 					    requeue,
@@ -1771,6 +1965,7 @@
 	if (status < 0)
 		LOG_ERROR_STATUS(status);
 
+	LOG_EXIT_STATUS(status);
 	return status;
 }
 
@@ -1780,6 +1975,8 @@
 	int status;
 	int requeue = 0;
 
+	LOG_ENTRY();
+
 	OCFS_ASSERT(lockres);
 	OCFS_ASSERT(lockres->l_ops);
 	OCFS_ASSERT(lockres->l_ops->unblock);
@@ -1790,11 +1987,15 @@
 
 	if (requeue)
 		ocfs2_schedule_blocked_lock(osb, lockres);
+
+	LOG_EXIT();
 }
 
 static void ocfs2_schedule_blocked_lock(ocfs_super *osb,
 					ocfs2_lock_res *lockres)
 {
+	LOG_ENTRY();
+
 	spin_lock(&osb->vote_task_lock);
 	if (list_empty(&lockres->l_blocked_list)) {
 		list_add_tail(&lockres->l_blocked_list,
@@ -1802,12 +2003,15 @@
 		osb->blocked_lock_count++;
 	}
 	spin_unlock(&osb->vote_task_lock);
+
+	LOG_EXIT();
 }
 
 /* needed for inodes as we have to take a reference on them.. */
 static void ocfs2_schedule_blocked_inode_lock(struct inode *inode,
 					      ocfs2_lock_res *lockres)
 {
+	LOG_ENTRY();
 	if (!igrab(inode)) {
 		LOG_ERROR_ARGS("Inode %llu asked to be scheduled during "
 			       "clear_inode!\n", OCFS_I(inode)->ip_blkno);
@@ -1815,4 +2019,5 @@
 	}
 
 	ocfs2_schedule_blocked_lock(OCFS2_SB(inode->i_sb), lockres);
+	LOG_EXIT();
 }

Modified: trunk/src/vote.c
===================================================================
--- trunk/src/vote.c	2004-12-09 23:19:25 UTC (rev 1701)
+++ trunk/src/vote.c	2004-12-14 05:49:17 UTC (rev 1702)
@@ -299,6 +299,8 @@
 	ocfs2_lock_res *lockres;
 	ocfs2_vote_work *work;
 
+	LOG_ENTRY();
+
 	spin_lock(&osb->vote_task_lock);
 	processed = osb->blocked_lock_count;
 	while (processed) {
@@ -332,6 +334,8 @@
 		spin_lock(&osb->vote_task_lock);
 	}
 	spin_unlock(&osb->vote_task_lock);
+
+	LOG_EXIT();
 }
 
 static inline int ocfs2_vote_thread_has_work(ocfs_super *osb)



More information about the Ocfs2-commits mailing list