[Ocfs2-commits] khackel commits r2811 - branches/ocfs2-1.2-cert/patches

svn-commits@oss.oracle.com svn-commits at oss.oracle.com
Mon Apr 3 02:45:59 CDT 2006


Author: khackel
Signed-off-by: jlbec
Date: 2006-04-03 02:45:59 -0500 (Mon, 03 Apr 2006)
New Revision: 2811

Added:
   branches/ocfs2-1.2-cert/patches/tune-down-msgs.patch
Log:
add tune-down-msgs.patch, but do NOT add to series
Signed-off-by: jlbec

Added: branches/ocfs2-1.2-cert/patches/tune-down-msgs.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/tune-down-msgs.patch	2006-04-03 06:27:12 UTC (rev 2810)
+++ branches/ocfs2-1.2-cert/patches/tune-down-msgs.patch	2006-04-03 07:45:59 UTC (rev 2811)
@@ -0,0 +1,677 @@
+Index: cert2/fs/ocfs2/dlm/dlmlock.c
+===================================================================
+--- cert2.orig/fs/ocfs2/dlm/dlmlock.c	2006-04-02 23:27:17.392139000 -0700
++++ cert2/fs/ocfs2/dlm/dlmlock.c	2006-04-03 00:26:03.376941000 -0700
+@@ -232,7 +232,7 @@
+ 			/* recovery lock was mastered by dead node.
+ 			 * we need to have calc_usage shoot down this
+ 			 * lockres and completely remaster it. */
+-			mlog(ML_NOTICE, "%s: recovery lock was owned by "
++			mlog(0, "%s: recovery lock was owned by "
+ 			     "dead node %u, remaster it now.\n",
+ 			     dlm->name, res->owner);
+ 		} else if (status != DLM_NOTQUEUED) {
+Index: cert2/fs/ocfs2/dlm/dlmmaster.c
+===================================================================
+--- cert2.orig/fs/ocfs2/dlm/dlmmaster.c	2006-04-02 23:27:17.730079000 -0700
++++ cert2/fs/ocfs2/dlm/dlmmaster.c	2006-04-03 00:32:55.480865000 -0700
+@@ -834,7 +834,7 @@
+ 		 * but they might own this lockres.  wait on them. */
+ 		bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
+ 		if (bit < O2NM_MAX_NODES) {
+-			mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to"
++			mlog(0, "%s:%.*s: at least one node (%d) to"
+ 			     "recover before lock mastery can begin\n",
+ 			     dlm->name, namelen, (char *)lockid, bit);
+ 			wait_on_recovery = 1;
+@@ -862,7 +862,7 @@
+ 		 * dlm spinlock would be detectable be a change on the mle,
+ 		 * so we only need to clear out the recovery map once. */
+ 		if (dlm_is_recovery_lock(lockid, namelen)) {
+-			mlog(ML_NOTICE, "%s: recovery map is not empty, but "
++			mlog(0, "%s: recovery map is not empty, but "
+ 			     "must master $RECOVERY lock now\n", dlm->name);
+ 			if (!dlm_pre_master_reco_lockres(dlm, res))
+ 				wait_on_recovery = 0;
+@@ -881,7 +881,7 @@
+ 		spin_lock(&dlm->spinlock);
+ 		bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
+ 		if (bit < O2NM_MAX_NODES) {
+-			mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to"
++			mlog(0, "%s:%.*s: at least one node (%d) to"
+ 			     "recover before lock mastery can begin\n",
+ 			     dlm->name, namelen, (char *)lockid, bit);
+ 			wait_on_recovery = 1;
+@@ -922,7 +922,7 @@
+ 	ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
+ 	if (ret < 0) {
+ 		wait_on_recovery = 1;
+-		mlog(ML_NOTICE, "%s:%.*s: node map changed, redo the "
++		mlog(0, "%s:%.*s: node map changed, redo the "
+ 		     "master request now, blocked=%d\n",
+ 		     dlm->name, res->lockname.len,
+ 		     res->lockname.name, blocked);
+@@ -991,7 +991,7 @@
+ 			ret = dlm_do_master_request(mle, res->owner);
+ 			if (ret < 0) {
+ 				/* give recovery a chance to run */
+-				mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
++				mlog(0, "link to %u went down?: %d\n", res->owner, ret);
+ 				msleep(500);
+ 				goto recheck;
+ 			}
+@@ -1195,14 +1195,14 @@
+ 			/* a node came up.  clear any old vote from
+ 			 * the response map and set it in the vote map
+ 			 * then restart the mastery. */
+-			mlog(ML_NOTICE, "node %d up while restarting\n", node);
++			mlog(0, "node %d up while restarting\n", node);
+ 
+ 			/* redo the master request, but only for the new node */
+ 			mlog(0, "sending request to new node\n");
+ 			clear_bit(node, mle->response_map);
+ 			set_bit(node, mle->vote_map);
+ 		} else {
+-			mlog(ML_ERROR, "node down! %d\n", node);
++			mlog(0, "node down! %d\n", node);
+ 			if (blocked) {
+ 				int lowest = find_next_bit(mle->maybe_map,
+ 						       O2NM_MAX_NODES, 0);
+@@ -1211,14 +1211,14 @@
+ 				clear_bit(node, mle->maybe_map);
+ 
+ 			       	if (node == lowest) {
+-					mlog(ML_ERROR, "expected master %u died"
++					mlog(0, "expected master %u died"
+ 					    " while this node was blocked "
+ 					    "waiting on it!\n", node);
+ 					lowest = find_next_bit(mle->maybe_map,
+ 						       	O2NM_MAX_NODES,
+ 						       	lowest+1);
+ 					if (lowest < O2NM_MAX_NODES) {
+-						mlog(ML_NOTICE, "%s:%.*s:still "
++						mlog(0, "%s:%.*s:still "
+ 						     "blocked. waiting on %u "
+ 						     "now\n", dlm->name,
+ 						     res->lockname.len,
+@@ -1233,7 +1233,7 @@
+ 						 * dlm_do_local_recovery_cleanup
+ 						 * has already run, so the mle 
+ 						 * refcount is ok */
+-						mlog(ML_NOTICE, "%s:%.*s: no "
++						mlog(0, "%s:%.*s: no "
+ 						     "longer blocking. try to "
+ 						     "master this here\n",
+ 						     dlm->name, 
+@@ -1385,7 +1385,7 @@
+ 		}
+ 		/* all other errors should be network errors,
+ 		 * and likely indicate node death */
+-		mlog(ML_ERROR, "link to %d went down!\n", to);
++		mlog(0, "link to %d went down!\n", to);
+ 		goto out;
+ 	}
+ 
+@@ -1694,18 +1694,18 @@
+ 		tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
+ 					    &assert, sizeof(assert), to, &r);
+ 		if (tmpret < 0) {
+-			mlog(ML_ERROR, "assert_master returned %d!\n", tmpret);
++			mlog(0, "assert_master returned %d!\n", tmpret);
+ 			if (!dlm_is_host_down(tmpret)) {
+ 				mlog(ML_ERROR, "unhandled error!\n");
+ 				BUG();
+ 			}
+ 			/* a node died.  finish out the rest of the nodes. */
+-			mlog(ML_ERROR, "link to %d went down!\n", to);
++			mlog(0, "link to %d went down!\n", to);
+ 			/* any nonzero status return will do */
+ 			ret = tmpret;
+ 		} else if (r < 0) {
+ 			/* ok, something horribly messed.  kill thyself. */
+-			mlog(ML_ERROR,"during assert master of %.*s to %u, "
++			mlog(0,"during assert master of %.*s to %u, "
+ 			     "got %d.\n", namelen, lockname, to, r);
+ 			spin_lock(&dlm->spinlock);
+ 			spin_lock(&dlm->master_lock);
+@@ -1782,7 +1782,7 @@
+ 		if (bit >= O2NM_MAX_NODES) {
+ 			/* not necessarily an error, though less likely.
+ 			 * could be master just re-asserting. */
+-			mlog(ML_NOTICE, "no bits set in the maybe_map, but %u "
++			mlog(0, "no bits set in the maybe_map, but %u "
+ 			     "is asserting! (%.*s)\n", assert->node_idx,
+ 			     namelen, name);
+ 		} else if (bit != assert->node_idx) {
+@@ -1794,7 +1794,7 @@
+ 				 * number winning the mastery will respond
+ 				 * YES to mastery requests, but this node
+ 				 * had no way of knowing.  let it pass. */
+-				mlog(ML_NOTICE, "%u is the lowest node, "
++				mlog(0, "%u is the lowest node, "
+ 				     "%u is asserting. (%.*s)  %u must "
+ 				     "have begun after %u won.\n", bit,
+ 				     assert->node_idx, namelen, name, bit,
+@@ -1803,12 +1803,12 @@
+ 		}
+ 		if (mle->type == DLM_MLE_MIGRATION) {
+ 			if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
+-				mlog(ML_NOTICE, "%s:%.*s: got cleanup assert"
++				mlog(0, "%s:%.*s: got cleanup assert"
+ 				     " from %u for migration\n",
+ 				     dlm->name, namelen, name,
+ 				     assert->node_idx);
+ 			} else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
+-				mlog(ML_NOTICE, "%s:%.*s: got unrelated assert"
++				mlog(0, "%s:%.*s: got unrelated assert"
+ 				     " from %u for migration, ignoring\n",
+ 				     dlm->name, namelen, name,
+ 				     assert->node_idx);
+@@ -2138,7 +2138,7 @@
+ 			/* check to see if this master is in the recovery map */
+ 			spin_lock(&dlm->spinlock);
+ 			if (test_bit(master, dlm->recovery_map)) {
+-				mlog(ML_NOTICE, "%s: node %u has not seen "
++				mlog(0, "%s: node %u has not seen "
+ 				     "node %u go down yet, and thinks the "
+ 				     "dead node is mastering the recovery "
+ 				     "lock.  must wait.\n", dlm->name,
+@@ -2293,7 +2293,7 @@
+ 	 * if we fail after this we need to re-dirty the lockres
+ 	 */
+ 	if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
+-		mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
++		mlog(0, "tried to migrate %.*s to %u, but "
+ 		     "the target went down.\n", res->lockname.len,
+ 		     res->lockname.name, target);
+ 		spin_lock(&res->spinlock);
+@@ -2383,7 +2383,7 @@
+ 			/* avoid hang during shutdown when migrating lockres 
+ 			 * to a node which also goes down */
+ 			if (dlm_is_node_dead(dlm, target)) {
+-				mlog(ML_NOTICE, "%s:%.*s: expected migration "
++				mlog(0, "%s:%.*s: expected migration "
+ 				     "target %u is no longer up, restarting\n",
+ 				     dlm->name, res->lockname.len,
+ 				     res->lockname.name, target);
+@@ -2523,7 +2523,7 @@
+ 	/* did the target go down or die? */
+ 	spin_lock(&dlm->spinlock);
+ 	if (!test_bit(target, dlm->domain_map)) {
+-		mlog(ML_ERROR, "aha. migration target %u just went down\n",
++		mlog(0, "aha. migration target %u just went down\n",
+ 		     target);
+ 		ret = -EHOSTDOWN;
+ 	}
+@@ -2861,7 +2861,7 @@
+ 				 * may result in the mle being unlinked and
+ 				 * freed, but there may still be a process
+ 				 * waiting in the dlmlock path which is fine. */
+-				mlog(ML_ERROR, "node %u was expected master\n",
++				mlog(0, "node %u was expected master\n",
+ 				     dead_node);
+ 				atomic_set(&mle->woken, 1);
+ 				spin_unlock(&mle->spinlock);
+@@ -2901,7 +2901,7 @@
+ 		spin_unlock(&mle->spinlock);
+ 		wake_up(&mle->wq);
+ 
+-		mlog(ML_NOTICE, "%s: node %u died during migration from "
++		mlog(0, "%s: node %u died during migration from "
+ 		     "%u to %u!\n", dlm->name, dead_node,
+ 		     mle->master, mle->new_master);
+ 		/* if there is a lockres associated with this
+Index: cert2/fs/ocfs2/dlm/dlmrecovery.c
+===================================================================
+--- cert2.orig/fs/ocfs2/dlm/dlmrecovery.c	2006-04-02 23:27:17.789020000 -0700
++++ cert2/fs/ocfs2/dlm/dlmrecovery.c	2006-04-03 00:42:08.148491000 -0700
+@@ -122,7 +122,7 @@
+ {
+ 	assert_spin_locked(&dlm->spinlock);
+ 	if (dlm->reco.dead_node != dead_node)
+-		mlog(ML_NOTICE, "%s: changing dead_node from %u to %u\n",
++		mlog(0, "%s: changing dead_node from %u to %u\n",
+ 		     dlm->name, dlm->reco.dead_node, dead_node);
+ 	dlm->reco.dead_node = dead_node;
+ }
+@@ -131,7 +131,7 @@
+ 				       u8 master)
+ {
+ 	assert_spin_locked(&dlm->spinlock);
+-	mlog(ML_NOTICE, "%s: changing new_master from %u to %u\n",
++	mlog(0, "%s: changing new_master from %u to %u\n",
+ 	     dlm->name, dlm->reco.new_master, master);
+ 	dlm->reco.new_master = master;
+ }
+@@ -253,7 +253,7 @@
+ 	struct dlm_reco_node_data *ndata;
+ 	struct dlm_lock_resource *res;
+ 	
+-	mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, "
++	mlog(0, "%s(%d): recovery info, state=%s, "
+ 		     "dead=%u, master=%u\n", dlm->name,
+ 		     dlm->dlm_reco_thread_task->pid, 
+ 		     dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive",
+@@ -287,11 +287,11 @@
+ 				st = "bad";
+ 				break;
+ 		}
+-		mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n", 
++		mlog(0, "%s: reco state, node %u, state=%s\n",
+ 		     dlm->name, ndata->node_num, st);
+ 	}
+ 	list_for_each_entry(res, &dlm->reco.resources, recovering) {
+-		mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n",
++		mlog(0, "%s: lockres %.*s on recovering list\n",
+ 		     dlm->name, res->lockname.len, res->lockname.name);
+ 	}
+ }
+@@ -362,13 +362,13 @@
+ int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
+ {
+ 	if (timeout) {
+-		mlog(ML_NOTICE, "%s: waiting %dms for notification of "
++		mlog(0, "%s: waiting %dms for notification of "
+ 		     "death of node %u\n", dlm->name, timeout, node);
+ 		wait_event_timeout(dlm->dlm_reco_thread_wq,
+ 			   dlm_is_node_dead(dlm, node),
+ 			   msecs_to_jiffies(timeout));
+ 	} else {
+-		mlog(ML_NOTICE, "%s: waiting indefinitely for notification "
++		mlog(0, "%s: waiting indefinitely for notification "
+ 		     "of death of node %u\n", dlm->name, node);
+ 		wait_event(dlm->dlm_reco_thread_wq,
+ 			   dlm_is_node_dead(dlm, node));
+@@ -380,13 +380,13 @@
+ int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
+ {
+ 	if (timeout) {
+-		mlog(ML_NOTICE, "%s: waiting %dms for notification of "
++		mlog(0, "%s: waiting %dms for notification of "
+ 		     "recovery of node %u\n", dlm->name, timeout, node);
+ 		wait_event_timeout(dlm->dlm_reco_thread_wq,
+ 			   dlm_is_node_recovered(dlm, node),
+ 			   msecs_to_jiffies(timeout));
+ 	} else {
+-		mlog(ML_NOTICE, "%s: waiting indefinitely for notification "
++		mlog(0, "%s: waiting indefinitely for notification "
+ 		     "of recovery of node %u\n", dlm->name, node);
+ 		wait_event(dlm->dlm_reco_thread_wq,
+ 			   dlm_is_node_recovered(dlm, node));
+@@ -414,7 +414,7 @@
+ void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
+ {
+ 	if (dlm_in_recovery(dlm)) {
+-		mlog(ML_NOTICE, "%s: reco thread %d in recovery: "
++		mlog(0, "%s: reco thread %d in recovery: "
+ 		     "state=%d, master=%u, dead=%u\n",
+ 		     dlm->name, dlm->dlm_reco_thread_task->pid,
+ 		     dlm->reco.state, dlm->reco.new_master,
+@@ -427,7 +427,7 @@
+ 				       !dlm_in_recovery(dlm), 
+ 				       msecs_to_jiffies(5000)))
+ 			break;
+-		mlog(ML_NOTICE, "%s: reco thread %d still in recovery: "
++		mlog(0, "%s: reco thread %d still in recovery: "
+ 		     "state=%d, master=%u, dead=%u\n",
+ 		     dlm->name, dlm->dlm_reco_thread_task->pid,
+ 		     dlm->reco.state, dlm->reco.new_master,
+@@ -493,7 +493,7 @@
+ 		/* return to main thread loop and sleep. */
+ 		return 0;
+ 	}
+-	mlog(ML_NOTICE, "%s(%d):recovery thread found node %u in the recovery map!\n",
++	mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
+ 	     dlm->name, dlm->dlm_reco_thread_task->pid,
+ 	     dlm->reco.dead_node);
+ 	spin_unlock(&dlm->spinlock);
+@@ -517,7 +517,7 @@
+ 		}
+ 		mlog(0, "another node will master this recovery session.\n");
+ 	}
+-	mlog(ML_NOTICE, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n",
++	mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n",
+ 	     dlm->name, dlm->dlm_reco_thread_task->pid, dlm->reco.new_master,
+ 	     dlm->node_num, dlm->reco.dead_node);
+ 
+@@ -530,21 +530,21 @@
+ 	return 0;
+ 
+ master_here:
+-	mlog(ML_NOTICE, "(%d) mastering recovery of %s:%u here(this=%u)!\n",
++	mlog(0, "(%d) mastering recovery of %s:%u here(this=%u)!\n",
+ 	     dlm->dlm_reco_thread_task->pid,
+ 	     dlm->name, dlm->reco.dead_node, dlm->node_num);
+ 
+ 	status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
+ 	if (status < 0) {
+ 		/* we should never hit this anymore */
+-		mlog(ML_ERROR, "error %d remastering locks for node %u, "
++		mlog(0, "error %d remastering locks for node %u, "
+ 		     "retrying.\n", status, dlm->reco.dead_node);
+ 		/* yield a bit to allow any final network messages
+ 		 * to get handled on remaining nodes */
+ 		msleep(100);
+ 	} else {
+ 		/* success!  see if any other nodes need recovery */
+-		mlog(ML_NOTICE, "DONE mastering recovery of %s:%u here(this=%u)!\n",
++		mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
+ 		     dlm->name, dlm->reco.dead_node, dlm->node_num);
+ 		dlm_reset_recovery(dlm);
+ 	}
+@@ -562,7 +562,6 @@
+ 	int all_nodes_done;
+ 	int destroy = 0;
+ 	int pass = 0;
+-	unsigned long long mlg;
+ 
+ 	do {
+ 		/* we have become recovery master.  there is no escaping
+@@ -611,7 +610,7 @@
+ 					     "yes" : "no");
+ 				} else {
+ 					/* -ENOMEM on the other node */
+-					mlog(ML_NOTICE, "%s: node %u returned "
++					mlog(0, "%s: node %u returned "
+ 					     "%d during recovery, retrying "
+ 					     "after a short wait\n",
+ 					     dlm->name, ndata->node_num,
+@@ -628,7 +627,7 @@
+ 				BUG();
+ 				break;
+ 			case DLM_RECO_NODE_DATA_DEAD:
+-				mlog(ML_ERROR, "%s:node %u died after "
++				mlog(0, "%s:node %u died after "
+ 				     "requesting recovery info for node %u\n",
+ 				     dlm->name, ndata->node_num, dead_node);
+ 				/* fine.  don't need this node's info.
+@@ -685,13 +684,7 @@
+ 					break;
+ 				case DLM_RECO_NODE_DATA_RECEIVING:
+ 				case DLM_RECO_NODE_DATA_REQUESTED:
+-					if (pass % 1000)
+-						mlg = ML_ERROR;
+-					else if (pass % 100 == 0)
+-						mlg = ML_NOTICE;
+-					else
+-						mlg = 0;
+-					mlog(mlg, "%s: node %u still in state %s\n",
++					mlog(0, "%s: node %u still in state %s\n",
+ 					     dlm->name, ndata->node_num,
+ 					     ndata->state==DLM_RECO_NODE_DATA_RECEIVING ?
+ 					     "receiving" : "requested");
+@@ -728,7 +721,7 @@
+ 			spin_unlock(&dlm->spinlock);
+ 			mlog(0, "should be done with recovery!\n");
+ 
+-			mlog(ML_NOTICE, "finishing recovery of %s at %lu, "
++			mlog(0, "finishing recovery of %s at %lu, "
+ 			     "dead=%u, this=%u, new=%u\n", dlm->name,
+ 			     jiffies, dlm->reco.dead_node,
+ 			     dlm->node_num, dlm->reco.new_master);
+@@ -899,7 +892,7 @@
+ 	reco_master = item->u.ral.reco_master;
+ 	mres = (struct dlm_migratable_lockres *)data;
+ 
+-	mlog(ML_NOTICE, "%s: recovery worker started, dead=%u, master=%u\n",
++	mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
+ 	     dlm->name, dead_node, reco_master);
+ 
+ 	if (dead_node != dlm->reco.dead_node ||
+@@ -907,13 +900,13 @@
+ 		/* worker could have been created before the recovery master
+ 		 * died.  if so, do not continue, but do not error. */
+ 		if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
+-			mlog(ML_NOTICE, "%s: will not send recovery state, "
++			mlog(0, "%s: will not send recovery state, "
+ 			     "recovery master %u died, thread=(dead=%u,mas=%u)"
+ 			     " current=(dead=%u,mas=%u)\n", dlm->name,
+ 			     reco_master, dead_node, reco_master,
+ 			     dlm->reco.dead_node, dlm->reco.new_master);
+ 		} else {
+-			mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, "
++			mlog(0, "%s: reco state invalid: reco(dead=%u, "
+ 			     "master=%u), request(dead=%u, master=%u)\n",
+ 			     dlm->name, dlm->reco.dead_node,
+ 			     dlm->reco.new_master, dead_node, reco_master);
+@@ -939,7 +932,7 @@
+ 				   	DLM_MRES_RECOVERY);
+ 		if (ret < 0) {
+ 			mlog_errno(ret);
+-			mlog(ML_ERROR, "%s: node %u went down while sending "
++			mlog(0, "%s: node %u went down while sending "
+ 			     "recovery state for dead node %u\n", dlm->name,
+ 			     reco_master, dead_node);
+ 			skip_all_done = 1;
+@@ -956,7 +949,7 @@
+ 		ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
+ 		if (ret < 0) {
+ 			mlog_errno(ret);
+-			mlog(ML_ERROR, "%s: node %u went down while sending "
++			mlog(0, "%s: node %u went down while sending "
+ 			     "recovery all-done for dead node %u\n", dlm->name,
+ 			     reco_master, dead_node);
+ 		}
+@@ -974,7 +967,7 @@
+ 	memset(&done_msg, 0, sizeof(done_msg));
+ 	done_msg.node_idx = dlm->node_num;
+ 	done_msg.dead_node = dead_node;
+-	mlog(ML_NOTICE, "sending DATA DONE message to %u, "
++	mlog(0, "sending DATA DONE message to %u, "
+ 	     "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
+ 	     done_msg.dead_node);
+ 
+@@ -1285,11 +1278,11 @@
+ 	return ret;
+ 
+ error:
+-	mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n",
++	mlog(0, "%s: dlm_send_mig_lockres_msg returned %d\n",
+ 	     dlm->name, ret);
+ 	if (!dlm_is_host_down(ret))
+ 		BUG();
+-	mlog(ML_NOTICE, "%s: node %u went down while sending %s "
++	mlog(0, "%s: node %u went down while sending %s "
+ 	     "lockres %.*s\n", dlm->name, send_to, 
+ 	     flags & DLM_MRES_RECOVERY ?  "recovery" : "migration",
+ 	     res->lockname.len, res->lockname.name);
+@@ -2237,13 +2230,13 @@
+ 	assert_spin_locked(&dlm->spinlock);
+ 
+ 	if (dlm->reco.new_master == idx) {
+-		mlog(ML_NOTICE, "%s: recovery master %d just died\n",
++		mlog(0, "%s: recovery master %d just died\n",
+ 		     dlm->name, idx);
+ 		if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
+ 			/* finalize1 was reached, so it is safe to clear
+ 			 * the new_master and dead_node.  that recovery
+ 			 * is complete. */
+-			mlog(ML_NOTICE, "%s: dead master %d had reached "
++			mlog(0, "%s: dead master %d had reached "
+ 			     "finalize1 state, clearing\n", dlm->name, idx);
+ 			dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
+ 			__dlm_reset_recovery(dlm);
+@@ -2377,7 +2370,7 @@
+ 	struct dlm_lockstatus lksb;
+ 	int status = -EINVAL;
+ 
+-	mlog(ML_NOTICE, "starting recovery of %s at %lu, dead=%u, this=%u\n",
++	mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
+ 	     dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
+ again:	
+ 	memset(&lksb, 0, sizeof(lksb));
+@@ -2385,17 +2378,17 @@
+ 	ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
+ 		      DLM_RECOVERY_LOCK_NAME, dlm_reco_ast, dlm, dlm_reco_bast);
+ 
+-	mlog(ML_NOTICE, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
++	mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
+ 	     dlm->name, ret, lksb.status);
+ 
+ 	if (ret == DLM_NORMAL) {
+-		mlog(ML_NOTICE, "dlm=%s dlmlock says I got it (this=%u)\n",
++		mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
+ 		     dlm->name, dlm->node_num);
+ 		
+ 		/* got the EX lock.  check to see if another node 
+ 		 * just became the reco master */
+ 		if (dlm_reco_master_ready(dlm)) {
+-			mlog(ML_NOTICE, "%s: got reco EX lock, but %u will "
++			mlog(0, "%s: got reco EX lock, but %u will "
+ 			     "do the recovery\n", dlm->name,
+ 			     dlm->reco.new_master);
+ 			status = -EEXIST;
+@@ -2406,7 +2399,7 @@
+ 			spin_lock(&dlm->spinlock);
+ 			if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
+ 				status = -EINVAL;	
+-				mlog(ML_NOTICE, "%s: got reco EX lock, but "
++				mlog(0, "%s: got reco EX lock, but "
+ 				     "node got recovered already\n", dlm->name);
+ 				if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
+ 					mlog(ML_ERROR, "%s: new master is %u "
+@@ -2421,7 +2414,7 @@
+ 		/* if this node has actually become the recovery master,
+ 		 * set the master and send the messages to begin recovery */
+ 		if (!status) {
+-			mlog(ML_NOTICE, "%s: dead=%u, this=%u, sending "
++			mlog(0, "%s: dead=%u, this=%u, sending "
+ 			     "begin_reco now\n", dlm->name, 
+ 			     dlm->reco.dead_node, dlm->node_num);
+ 			status = dlm_send_begin_reco_message(dlm,
+@@ -2452,7 +2445,7 @@
+ 			mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
+ 		}
+ 	} else if (ret == DLM_NOTQUEUED) {
+-		mlog(ML_NOTICE, "dlm=%s dlmlock says another node got it (this=%u)\n",
++		mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
+ 		     dlm->name, dlm->node_num);
+ 		/* another node is master. wait on
+ 		 * reco.new_master != O2NM_INVALID_NODE_NUM 
+@@ -2461,16 +2454,16 @@
+ 					 dlm_reco_master_ready(dlm),
+ 					 msecs_to_jiffies(1000));
+ 		if (!dlm_reco_master_ready(dlm)) {
+-			mlog(ML_NOTICE, "%s: reco master taking awhile\n",
++			mlog(0, "%s: reco master taking awhile\n",
+ 			     dlm->name);
+ 			goto again;
+ 		}
+ 		/* another node has informed this one that it is reco master */
+-		mlog(ML_NOTICE, "%s: reco master %u is ready to recover %u\n",
++		mlog(0, "%s: reco master %u is ready to recover %u\n",
+ 		     dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
+ 		status = -EEXIST;
+ 	} else if (ret == DLM_RECOVERING) {
+-		mlog(ML_NOTICE, "dlm=%s dlmlock says master node died (this=%u)\n",
++		mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
+ 		     dlm->name, dlm->node_num);
+ 		goto again;
+ 	} else {
+@@ -2504,7 +2497,7 @@
+ 
+ 	mlog_entry("%u\n", dead_node);
+ 
+-	mlog(ML_NOTICE, "%s: dead node is %u\n", dlm->name, dead_node);
++	mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
+ 
+ 	spin_lock(&dlm->spinlock);
+ 	dlm_node_iter_init(dlm->domain_map, &iter);
+@@ -2548,7 +2541,7 @@
+ 			/* this is now a serious problem, possibly ENOMEM 
+ 			 * in the network stack.  must retry */
+ 			mlog_errno(ret);
+-			mlog(ML_ERROR, "begin reco of dlm %s to node %u "
++			mlog(0, "begin reco of dlm %s to node %u "
+ 			    " returned %d\n", dlm->name, nodenum, ret);
+ 			res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
+ 						 DLM_RECOVERY_LOCK_NAME_LEN);
+@@ -2556,14 +2549,14 @@
+ 				dlm_print_one_lock_resource(res);
+ 				dlm_lockres_put(res);
+ 			} else {
+-				mlog(ML_ERROR, "recovery lock not found\n");
++				mlog(0, "recovery lock not found\n");
+ 			}
+ 			/* sleep for a bit in hopes that we can avoid 
+ 			 * another ENOMEM */
+ 			msleep(100);
+ 			goto retry;
+ 		} else if (ret == EAGAIN) {
+-			mlog(ML_NOTICE, "%s: trying to start recovery of node "
++			mlog(0, "%s: trying to start recovery of node "
+ 			     "%u, but node %u is waiting for last recovery "
+ 			     "to complete, backoff for a bit\n", dlm->name,
+ 			     dead_node, nodenum);
+@@ -2586,7 +2579,7 @@
+ 	
+ 	spin_lock(&dlm->spinlock);
+ 	if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
+-		mlog(ML_NOTICE, "%s: node %u wants to recover node %u (%u:%u) "
++		mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
+ 		     "but this node is in finalize state, waiting on finalize2\n",
+ 		     dlm->name, br->node_idx, br->dead_node,
+ 		     dlm->reco.dead_node, dlm->reco.new_master);
+@@ -2595,7 +2588,7 @@
+ 	}
+ 	spin_unlock(&dlm->spinlock);
+ 
+-	mlog(ML_NOTICE, "%s: node %u wants to recover node %u (%u:%u)\n",
++	mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
+ 	     dlm->name, br->node_idx, br->dead_node,
+ 	     dlm->reco.dead_node, dlm->reco.new_master);
+ 
+@@ -2615,7 +2608,7 @@
+ 		}
+ 	}
+ 	if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
+-		mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
++		mlog(0, "%s: dead_node previously set to %u, "
+ 		     "node %u changing it to %u\n", dlm->name, 
+ 		     dlm->reco.dead_node, br->node_idx, br->dead_node);
+ 	}
+@@ -2640,7 +2633,7 @@
+ 
+ 	dlm_kick_recovery_thread(dlm);
+ 	
+-	mlog(ML_NOTICE, "%s: recovery started by node %u, for %u (%u:%u)\n",
++	mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
+ 	     dlm->name, br->node_idx, br->dead_node,
+ 	     dlm->reco.dead_node, dlm->reco.new_master);
+ 
+@@ -2658,7 +2651,7 @@
+ 	int status;
+ 	int stage = 1;
+ 
+-	mlog(ML_NOTICE, "finishing recovery for node %s:%u, "
++	mlog(0, "finishing recovery for node %s:%u, "
+ 	     "stage %d\n", dlm->name, dlm->reco.dead_node, stage);
+ 
+ 	spin_lock(&dlm->spinlock);
+@@ -2685,7 +2678,7 @@
+ 				/* this has no effect on this recovery 
+ 				 * session, so set the status to zero to 
+ 				 * finish out the last recovery */
+-				mlog(ML_ERROR, "node %u went down after this "
++				mlog(0, "node %u went down after this "
+ 				     "node finished recovery.\n", nodenum);
+ 				ret = 0;
+ 				continue;
+@@ -2716,7 +2709,7 @@
+ 	if (fr->flags & DLM_FINALIZE_STAGE2)
+ 		stage = 2;
+ 
+-	mlog(ML_NOTICE, "%s: node %u finalizing recovery stage%d of "
++	mlog(0, "%s: node %u finalizing recovery stage%d of "
+ 	    "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
+ 	    fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
+ 
+@@ -2767,7 +2760,7 @@
+ 			BUG();
+ 	}
+ 
+-	mlog(ML_NOTICE, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
++	mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
+ 	     dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master);
+ 
+ 	dlm_put(dlm);




More information about the Ocfs2-commits mailing list