[Ocfs2-commits] mfasheh commits r2168 - in trunk/fs/ocfs2: cluster
dlm
svn-commits at oss.oracle.com
svn-commits at oss.oracle.com
Fri Apr 22 20:52:26 CDT 2005
Author: mfasheh
Signed-off-by: manish
Date: 2005-04-22 20:52:24 -0500 (Fri, 22 Apr 2005)
New Revision: 2168
Modified:
trunk/fs/ocfs2/cluster/masklog.c
trunk/fs/ocfs2/cluster/masklog.h
trunk/fs/ocfs2/dlm/Makefile
trunk/fs/ocfs2/dlm/dlmast.c
trunk/fs/ocfs2/dlm/dlmcommon.h
trunk/fs/ocfs2/dlm/dlmconvert.c
trunk/fs/ocfs2/dlm/dlmdebug.c
trunk/fs/ocfs2/dlm/dlmdomain.c
trunk/fs/ocfs2/dlm/dlmlock.c
trunk/fs/ocfs2/dlm/dlmmaster.c
trunk/fs/ocfs2/dlm/dlmrecovery.c
trunk/fs/ocfs2/dlm/dlmthread.c
trunk/fs/ocfs2/dlm/dlmunlock.c
Log:
* add some dlm mlog bits
* get rid of dlmprintk, use mlog instead
* clean up some of the dlm "wtf" and "woo" messages to be more professional
* add mlog entry / exit convenience routines
Signed-off-by: manish
Modified: trunk/fs/ocfs2/cluster/masklog.c
===================================================================
--- trunk/fs/ocfs2/cluster/masklog.c 2005-04-22 22:04:18 UTC (rev 2167)
+++ trunk/fs/ocfs2/cluster/masklog.c 2005-04-23 01:52:24 UTC (rev 2168)
@@ -184,6 +184,11 @@
set_a_string(HEARTBEAT);
set_a_string(HB_BIO);
set_a_string(DLMFS);
+ set_a_string(DLM);
+ set_a_string(DLM_DOMAIN);
+ set_a_string(DLM_THREAD);
+ set_a_string(DLM_MASTER);
+ set_a_string(DLM_RECOVERY);
set_a_string(ERROR);
set_a_string(NOTICE);
set_a_string(KTHREAD);
Modified: trunk/fs/ocfs2/cluster/masklog.h
===================================================================
--- trunk/fs/ocfs2/cluster/masklog.h 2005-04-22 22:04:18 UTC (rev 2167)
+++ trunk/fs/ocfs2/cluster/masklog.h 2005-04-23 01:52:24 UTC (rev 2168)
@@ -86,9 +86,14 @@
#define ML_MSG 0x0000000000000008ULL /* network messages */
#define ML_SOCKET 0x0000000000000010ULL /* socket lifetime */
#define ML_AIO 0x0000000000000020ULL /* aio read and write */
-#define ML_HEARTBEAT 0x0000000000000040ULL /* cluster heartbeat */
+#define ML_HEARTBEAT 0x0000000000000040ULL /* cluster heartbeat */
#define ML_HB_BIO 0x0000000000000080ULL /* heartbaet io tracing */
#define ML_DLMFS 0x0000000000000100ULL /* ocfs2_dlmfs */
+#define ML_DLM 0x0000000000000200ULL /* dlm general debugging */
+#define ML_DLM_DOMAIN 0x0000000000000400ULL /* dlm domain debugging */
+#define ML_DLM_THREAD 0x0000000000000800ULL /* dlm domain thread */
+#define ML_DLM_MASTER 0x0000000000001000ULL /* dlm master functions */
+#define ML_DLM_RECOVERY 0x0000000000002000ULL /* dlm master functions */
/* bits that are infrequently given and frequently matched in the high word */
#define ML_ERROR 0x0000000100000000ULL /* sent to KERN_ERR */
#define ML_NOTICE 0x0000000200000000ULL /* setn to KERN_NOTICE */
@@ -173,6 +178,39 @@
mlog(ML_ERROR, "status = %lld\n", (long long)(st)); \
} while (0)
+#define mlog_entry(fmt, args...) do { \
+ mlog(ML_ENTRY, "ENTRY:" fmt, ##args); \
+} while (0)
+
+#define mlog_entry_void() do { \
+ mlog(ML_ENTRY, "ENTRY:\n"); \
+} while (0)
+
+#define mlog_exit(st) do { \
+ if (__builtin_types_compatible_p(typeof(st), long long)) \
+ mlog(ML_EXIT, "EXIT: %lld\n", (long long) (st)); \
+ else if (__builtin_types_compatible_p(typeof(st), unsigned long)) \
+ mlog(ML_EXIT, "EXIT: %lu\n", (unsigned long) (st)); \
+ else if (__builtin_types_compatible_p(typeof(st), long)) \
+ mlog(ML_EXIT, "EXIT: %ld\n", (long) (st)); \
+ else if (__builtin_types_compatible_p(typeof(st), unsigned int) \
+ || __builtin_types_compatible_p(typeof(st), unsigned short) \
+ || __builtin_types_compatible_p(typeof(st), unsigned char)) \
+ mlog(ML_EXIT, "EXIT: %u\n", (unsigned int) (st)); \
+ else if (__builtin_types_compatible_p(typeof(st), int) \
+ || __builtin_types_compatible_p(typeof(st), signed short) \
+ || __builtin_types_compatible_p(typeof(st), signed char)) \
+ mlog(ML_EXIT, "EXIT: %d\n", (int) (st)); \
+ else if (__builtin_types_compatible_p(typeof(st), void *)) \
+ mlog(ML_EXIT, "EXIT: %p\n", (void *) (st)); \
+ else \
+ mlog(ML_EXIT, "EXIT: %llu\n", (unsigned long long) (st)); \
+} while (0)
+
+#define mlog_exit_void() do { \
+ mlog(ML_EXIT, "EXIT"); \
+} while (0)
+
#include <linux/proc_fs.h>
int mlog_init_proc(struct proc_dir_entry *parent);
void mlog_remove_proc(struct proc_dir_entry *parent);
Modified: trunk/fs/ocfs2/dlm/Makefile
===================================================================
--- trunk/fs/ocfs2/dlm/Makefile 2005-04-22 22:04:18 UTC (rev 2167)
+++ trunk/fs/ocfs2/dlm/Makefile 2005-04-23 01:52:24 UTC (rev 2168)
@@ -22,10 +22,6 @@
EXTRA_CFLAGS += -DBACKING_DEV_CAPABILITIES
endif
-ifneq ($(QUIET),1)
-EXTRA_CFLAGS += -DENABLE_DLMPRINTK
-endif
-
#
# For OCFS2, SOURCES+HEADERS live even in kbuild-26 because they
# are needed for VERSION_FILES
Modified: trunk/fs/ocfs2/dlm/dlmast.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmast.c 2005-04-22 22:04:18 UTC (rev 2167)
+++ trunk/fs/ocfs2/dlm/dlmast.c 2005-04-23 01:52:24 UTC (rev 2168)
@@ -47,13 +47,15 @@
#include "dlmapi.h"
#include "dlmcommon.h"
+#define MLOG_MASK_PREFIX ML_DLM
+#include "cluster/masklog.h"
static void dlm_update_lvb(dlm_ctxt *dlm, dlm_lock_resource *res,
dlm_lock *lock);
void __dlm_queue_ast(dlm_ctxt *dlm, dlm_lock *lock)
{
- dlmprintk0("\n");
+ mlog_entry_void();
DLM_ASSERT(dlm);
DLM_ASSERT(lock);
@@ -61,7 +63,7 @@
assert_spin_locked(&dlm->ast_lock);
DLM_ASSERT(list_empty(&lock->ast_list));
if (lock->ast_pending)
- dlmprintk0("lock has an ast getting flushed right now\n");
+ mlog(0, "lock has an ast getting flushed right now\n");
/* putting lock on list, add a ref */
dlm_lock_get(lock);
@@ -73,7 +75,7 @@
void dlm_queue_ast(dlm_ctxt *dlm, dlm_lock *lock)
{
- dlmprintk0("\n");
+ mlog_entry_void();
DLM_ASSERT(dlm);
DLM_ASSERT(lock);
@@ -86,7 +88,7 @@
void __dlm_queue_bast(dlm_ctxt *dlm, dlm_lock *lock)
{
- dlmprintk0("\n");
+ mlog_entry_void();
DLM_ASSERT(dlm);
DLM_ASSERT(lock);
@@ -94,7 +96,7 @@
DLM_ASSERT(list_empty(&lock->bast_list));
if (lock->bast_pending)
- dlmprintk0("lock has a bast getting flushed right now\n");
+ mlog(0, "lock has a bast getting flushed right now\n");
/* putting lock on list, add a ref */
dlm_lock_get(lock);
@@ -106,7 +108,7 @@
void dlm_queue_bast(dlm_ctxt *dlm, dlm_lock *lock)
{
- dlmprintk0("\n");
+ mlog_entry_void();
DLM_ASSERT(dlm);
DLM_ASSERT(lock);
@@ -128,12 +130,12 @@
spin_lock(&res->spinlock);
/* check the lksb flags for the direction */
if (lksb->flags & DLM_LKSB_GET_LVB) {
- dlmprintk("getting lvb from lockres for %s node\n",
+ mlog(0, "getting lvb from lockres for %s node\n",
lock->ml.node == dlm->node_num ? "master" :
"remote");
memcpy(lksb->lvb, res->lvb, DLM_LVB_LEN);
} else if (lksb->flags & DLM_LKSB_PUT_LVB) {
- dlmprintk("setting lvb from lockres for %s node\n",
+ mlog(0, "setting lvb from lockres for %s node\n",
lock->ml.node == dlm->node_num ? "master" :
"remote");
memcpy(res->lvb, lksb->lvb, DLM_LVB_LEN);
@@ -150,7 +152,7 @@
dlm_astlockfunc_t *fn;
dlm_lockstatus *lksb;
- dlmprintk0("\n");
+ mlog_entry_void();
DLM_ASSERT(lock);
DLM_ASSERT(res);
@@ -171,7 +173,7 @@
dlm_lockstatus *lksb;
int lksbflags = 0;
- dlmprintk0("\n");
+ mlog_entry_void();
DLM_ASSERT(lock);
DLM_ASSERT(res);
@@ -192,8 +194,9 @@
dlm_lock *lock, int blocked_type)
{
dlm_bastlockfunc_t *fn = lock->bast;
- dlmprintk0("\n");
+ mlog_entry_void();
+
DLM_ASSERT(lock->ml.node == dlm->node_num);
DLM_ASSERT(fn);
@@ -234,31 +237,30 @@
if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) ==
(LKM_PUT_LVB|LKM_GET_LVB)) {
- dlmprintk("both PUT and GET lvb specified\n");
+ mlog(ML_ERROR, "both PUT and GET lvb specified\n");
ret = DLM_BADARGS;
goto leave;
}
- dlmprintk("lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" :
+ mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" :
(flags & LKM_GET_LVB ? "get lvb" : "none"));
- dlmprintk("type=%d, blocked_type=%d\n", past->type, past->blocked_type);
+ mlog(0, "type=%d, blocked_type=%d\n", past->type, past->blocked_type);
if (past->type != DLM_AST &&
past->type != DLM_BAST) {
- dlmprintk("Eeeek unknown ast type! %d, cookie=%llu, "
- "name=%.*s\n",
- past->type, cookie, locklen, name);
+ mlog(ML_ERROR, "Unknown ast type! %d, cookie=%llu, "
+ "name=%.*s\n", past->type, cookie, locklen, name);
ret = DLM_IVLOCKID;
goto leave;
}
res = dlm_lookup_lockres(dlm, name, locklen);
if (!res) {
- dlmprintk("eek! got %sast for unknown lockres! cookie=%llu, "
- "name=%.*s, namelen=%u\n",
- past->type == DLM_AST ? "" : "b",
- cookie, locklen, name, locklen);
+ mlog(ML_ERROR, "got %sast for unknown lockres! cookie=%llu, "
+ "name=%.*s, namelen=%u\n",
+ past->type == DLM_AST ? "" : "b",
+ cookie, locklen, name, locklen);
ret = DLM_IVLOCKID;
goto leave;
}
@@ -266,18 +268,18 @@
/* cannot get a proxy ast message if this node owns it */
DLM_ASSERT(res->owner != dlm->node_num);
- dlmprintk("lockres %.*s\n", res->lockname.len, res->lockname.name);
+ mlog(0, "lockres %.*s\n", res->lockname.len, res->lockname.name);
if (!dlm_is_recovery_lock(past->name, past->namelen))
down_read(&dlm->recovery_sem);
spin_lock(&res->spinlock);
if (res->state & DLM_LOCK_RES_RECOVERING) {
- dlmprintk0("responding with DLM_RECOVERING!\n");
+ mlog(0, "responding with DLM_RECOVERING!\n");
ret = DLM_RECOVERING;
goto unlock_out;
}
if (res->state & DLM_LOCK_RES_MIGRATING) {
- dlmprintk0("responding with DLM_MIGRATING!\n");
+ mlog(0, "responding with DLM_MIGRATING!\n");
ret = DLM_MIGRATING;
goto unlock_out;
}
@@ -302,10 +304,9 @@
goto do_ast;
}
- dlmprintk("eek! got %sast for unknown lock! cookie=%llu, "
- "name=%.*s, namelen=%u\n",
- past->type == DLM_AST ? "" : "b",
- cookie, locklen, name, locklen);
+ mlog(ML_ERROR, "got %sast for unknown lock! cookie=%llu, name=%.*s, "
+ "namelen=%u\n", past->type == DLM_AST ? "" : "b", cookie, locklen,
+ name, locklen);
ret = DLM_NORMAL;
unlock_out:
@@ -320,7 +321,7 @@
/* do not alter lock refcount. switching lists. */
list_del_init(&lock->list);
list_add_tail(&lock->list, &res->granted);
- dlmprintk("ast: adding to granted list... type=%d, "
+ mlog(0, "ast: adding to granted list... type=%d, "
"convert_type=%d\n", lock->ml.type, lock->ml.convert_type);
if (lock->ml.convert_type != LKM_IVMODE) {
lock->ml.type = lock->ml.convert_type;
@@ -368,9 +369,9 @@
size_t iovlen = 1;
int status;
- dlmprintk("res %.*s, to=%u, type=%d, blocked_type=%d\n",
- res->lockname.len, res->lockname.name, lock->ml.node,
- msg_type, blocked_type);
+ mlog_entry("res %.*s, to=%u, type=%d, blocked_type=%d\n",
+ res->lockname.len, res->lockname.name, lock->ml.node,
+ msg_type, blocked_type);
memset(&past, 0, sizeof(dlm_proxy_ast));
past.node_idx = dlm->node_num;
@@ -383,7 +384,7 @@
iov[0].iov_len = sizeof(dlm_proxy_ast);
iov[0].iov_base = &past;
if (flags & DLM_LKSB_GET_LVB) {
- dlmprintk0("returning requested LVB data\n");
+ mlog(0, "returning requested LVB data\n");
past.flags |= LKM_GET_LVB;
iov[1].iov_len = DLM_LVB_LEN;
iov[1].iov_base = lock->lksb->lvb;
@@ -394,22 +395,19 @@
ret = net_send_message_iov(DLM_PROXY_AST_MSG, dlm->key, iov, iovlen,
lock->ml.node, &status);
if (ret < 0)
- dlmprintk("(%d) dlm_send_proxy_ast: returning %d\n",
- current->pid, ret);
+ mlog_errno(ret);
else {
if (status == DLM_RECOVERING) {
- dlmprintk("sent AST to node %u, it thinks this "
- "node is DEAD!\n", lock->ml.node);
- dlmprintk0("must die now. goodbye!\n");
+ mlog(ML_ERROR, "sent AST to node %u, it thinks this "
+ "node is dead!\n", lock->ml.node);
BUG();
} else if (status == DLM_MIGRATING) {
- dlmprintk("sent AST to node %u, it returned "
- "DLM_MIGRATING! evil!\n", lock->ml.node);
- dlmprintk0("must die now. goodbye!\n");
+ mlog(ML_ERROR, "sent AST to node %u, it returned "
+ "DLM_MIGRATING!\n", lock->ml.node);
BUG();
} else if (status != DLM_NORMAL) {
- dlmprintk("AST to node %u returned %d!\n",
- lock->ml.node, status);
+ mlog(ML_ERROR, "AST to node %u returned %d!\n",
+ lock->ml.node, status);
/* ignore it */
}
ret = 0;
Modified: trunk/fs/ocfs2/dlm/dlmcommon.h
===================================================================
--- trunk/fs/ocfs2/dlm/dlmcommon.h 2005-04-22 22:04:18 UTC (rev 2167)
+++ trunk/fs/ocfs2/dlm/dlmcommon.h 2005-04-23 01:52:24 UTC (rev 2168)
@@ -27,17 +27,6 @@
#include <linux/kref.h>
-#ifndef ENABLE_DLMPRINTK
-#define dlmprintk(x, arg...)
-#define dlmprintk0(x)
-#else
-#define dlmprintk(x, arg...) printk("(dlm:%d)(%s:%d) " x, current->pid, __FUNCTION__, __LINE__, ##arg)
-#define dlmprintk0(x) printk("(dlm:%d)(%s:%d) " x, current->pid, __FUNCTION__, __LINE__)
-#endif
-
-#define dlmerror(x, arg...) printk(KERN_ERR "(dlm:%d)(%s:%d) " x, current->pid, __FUNCTION__, __LINE__, ##arg)
-#define dlmerror0(x) printk(KERN_ERR "(dlm:%d)(%s:%d) " x, current->pid, __FUNCTION__, __LINE__)
-
#define DLM_ASSERT(x) ({ if (!(x)) { printk("assert failed! %s:%d\n", __FILE__, __LINE__); BUG(); } })
@@ -782,16 +771,13 @@
assert_spin_locked(&res->spinlock);
- if (res->state & DLM_LOCK_RES_RECOVERING) {
- dlmprintk0("returning DLM_RECOVERING\n");
+ if (res->state & DLM_LOCK_RES_RECOVERING)
status = DLM_RECOVERING;
- } else if (res->state & DLM_LOCK_RES_MIGRATING) {
- dlmprintk0("returning DLM_MIGRATING\n");
+ else if (res->state & DLM_LOCK_RES_MIGRATING)
status = DLM_MIGRATING;
- } else if (res->state & DLM_LOCK_RES_IN_PROGRESS) {
- dlmprintk0("returning DLM_FORWARD\n");
+ else if (res->state & DLM_LOCK_RES_IN_PROGRESS)
status = DLM_FORWARD;
- }
+
return status;
}
Modified: trunk/fs/ocfs2/dlm/dlmconvert.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmconvert.c 2005-04-22 22:04:18 UTC (rev 2167)
+++ trunk/fs/ocfs2/dlm/dlmconvert.c 2005-04-23 01:52:24 UTC (rev 2168)
@@ -49,6 +49,9 @@
#include "dlmconvert.h"
+#define MLOG_MASK_PREFIX ML_DLM
+#include "cluster/masklog.h"
+
/* NOTE: __dlmconvert_master is the only function in here that
* needs a spinlock held on entry (res->spinlock) and it is the
* only one that holds a lock on exit (res->spinlock).
@@ -120,22 +123,23 @@
assert_spin_locked(&res->spinlock);
- dlmprintk("type=%d, convert_type=%d, new convert_type=%d\n", lock->ml.type,
- lock->ml.convert_type, type);
+ mlog_entry("type=%d, convert_type=%d, new convert_type=%d\n",
+ lock->ml.type, lock->ml.convert_type, type);
spin_lock(&lock->spinlock);
/* already converting? */
if (lock->ml.convert_type != LKM_IVMODE) {
- dlmprintk0("attempted to convert a lock with a lock conversion "
- "pending\n");
+ mlog(ML_ERROR, "attempted to convert a lock with a lock "
+ "conversion pending\n");
status = DLM_DENIED;
goto unlock_exit;
}
/* must be on grant queue to convert */
if (!dlm_lock_on_list(&res->granted, lock)) {
- dlmprintk0("attempted to convert a lock not on grant queue\n");
+ mlog(ML_ERROR, "attempted to convert a lock not on grant "
+ "queue\n");
status = DLM_DENIED;
goto unlock_exit;
}
@@ -144,26 +148,25 @@
switch (lock->ml.type) {
case LKM_EXMODE:
/* EX + LKM_VALBLK + convert == set lvb */
- dlmprintk("will set lvb: converting %s->%s\n",
- dlm_lock_mode_name(lock->ml.type),
- dlm_lock_mode_name(type));
+ mlog(0, "will set lvb: converting %s->%s\n",
+ dlm_lock_mode_name(lock->ml.type),
+ dlm_lock_mode_name(type));
lock->lksb->flags |= DLM_LKSB_PUT_LVB;
break;
case LKM_PRMODE:
case LKM_NLMODE:
/* refetch if new level is not NL */
if (type > LKM_NLMODE) {
- dlmprintk("will fetch new value into "
- "lvb: converting %s->%s\n",
- dlm_lock_mode_name(lock->ml.type),
- dlm_lock_mode_name(type));
+ mlog(0, "will fetch new value into "
+ "lvb: converting %s->%s\n",
+ dlm_lock_mode_name(lock->ml.type),
+ dlm_lock_mode_name(type));
lock->lksb->flags |= DLM_LKSB_GET_LVB;
} else {
- dlmprintk("will NOT fetch new value "
- "into lvb: converting "
- "%s->%s\n",
- dlm_lock_mode_name(lock->ml.type),
- dlm_lock_mode_name(type));
+ mlog(0, "will NOT fetch new value "
+ "into lvb: converting %s->%s\n",
+ dlm_lock_mode_name(lock->ml.type),
+ dlm_lock_mode_name(type));
flags &= ~(LKM_VALBLK);
}
break;
@@ -197,12 +200,12 @@
/* fall thru to grant */
grant:
- dlmprintk("res %.*s, granting %s lock\n", res->lockname.len,
- res->lockname.name, dlm_lock_mode_name(type));
+ mlog(0, "res %.*s, granting %s lock\n", res->lockname.len,
+ res->lockname.name, dlm_lock_mode_name(type));
/* immediately grant the new lock type */
lock->lksb->status = DLM_NORMAL;
if (lock->ml.node == dlm->node_num)
- dlmprintk0("doing in-place convert for nonlocal lock\n");
+ mlog(0, "doing in-place convert for nonlocal lock\n");
lock->ml.type = type;
status = DLM_NORMAL;
*call_ast = 1;
@@ -210,14 +213,14 @@
switch_queues:
if (flags & LKM_NOQUEUE) {
- dlmprintk("failed to convert NOQUEUE lock %.*s from "
- "%d to %d...\n", res->lockname.len,
- res->lockname.name, lock->ml.type, type);
+ mlog(0, "failed to convert NOQUEUE lock %.*s from "
+ "%d to %d...\n", res->lockname.len, res->lockname.name,
+ lock->ml.type, type);
status = DLM_NOTQUEUED;
goto unlock_exit;
}
- dlmprintk("res %.*s, queueing...\n", res->lockname.len,
- res->lockname.name);
+ mlog(0, "res %.*s, queueing...\n", res->lockname.len,
+ res->lockname.name);
lock->ml.convert_type = type;
/* do not alter lock refcount. switching lists. */
@@ -243,8 +246,8 @@
{
dlm_status status;
- dlmprintk("type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
- lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
+ mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
+ lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
spin_lock(&res->spinlock);
if (res->state & DLM_LOCK_RES_RECOVERING) {
@@ -261,8 +264,8 @@
list_del_init(&lock->list);
list_add_tail(&lock->list, &res->converting);
if (lock->ml.convert_type != LKM_IVMODE) {
- dlmprintk0("error! converting a remote lock that is already "
- "converting!\n");
+ mlog(ML_ERROR, "converting a remote lock that is already "
+ "converting!\n");
/* TODO: return correct error */
BUG();
}
@@ -273,11 +276,9 @@
flags |= LKM_PUT_LVB;
lock->lksb->flags |= DLM_LKSB_PUT_LVB;
} else {
- if (lock->ml.convert_type == LKM_NLMODE) {
- dlmprintk0("erm, no point in specifying "
- "LKM_VALBLK if converting to NL\n");
+ if (lock->ml.convert_type == LKM_NLMODE)
flags &= ~LKM_VALBLK;
- } else {
+ else {
flags |= LKM_GET_LVB;
lock->lksb->flags |= DLM_LKSB_GET_LVB;
}
@@ -328,7 +329,7 @@
struct iovec iov[2];
size_t iovlen = 1;
- dlmprintk0("\n");
+ mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
memset(&convert, 0, sizeof(dlm_convert_lock));
convert.node_idx = dlm->node_num;
@@ -355,21 +356,17 @@
// successfully sent and received
ret = status; // this is already a dlm_status
if (ret == DLM_RECOVERING) {
- dlmprintk("node %u returned DLM_RECOVERING "
- "from convert message!\n",
- res->owner);
+ mlog(0, "node %u returned DLM_RECOVERING from convert "
+ "message!\n", res->owner);
} else if (ret == DLM_MIGRATING) {
- dlmprintk("node %u returned DLM_MIGRATING "
- "from convert message!\n",
- res->owner);
+ mlog(0, "node %u returned DLM_MIGRATING from convert "
+ "message!\n", res->owner);
} else if (ret == DLM_FORWARD) {
- dlmprintk("node %u returned DLM_FORWARD "
- "from convert message!\n",
- res->owner);
+ mlog(0, "node %u returned DLM_FORWARD from convert "
+ "message!\n", res->owner);
}
} else {
- dlmprintk("error occurred in net_send_message: %d\n",
- tmpret);
+ mlog_errno(tmpret);
ret = dlm_err_to_dlm_status(tmpret);
}
@@ -413,13 +410,13 @@
if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) ==
(LKM_PUT_LVB|LKM_GET_LVB)) {
- dlmprintk("both PUT and GET lvb specified\n");
+ mlog(ML_ERROR, "both PUT and GET lvb specified\n");
status = DLM_BADARGS;
goto leave;
}
- dlmprintk("lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" :
- (flags & LKM_GET_LVB ? "get lvb" : "none"));
+ mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" :
+ (flags & LKM_GET_LVB ? "get lvb" : "none"));
status = DLM_IVLOCKID;
res = dlm_lookup_lockres(dlm, cnv->name, cnv->namelen);
@@ -473,8 +470,8 @@
leave:
if (!lock)
- dlmprintk("did not find lock to convert on "
- "grant queue! cookie=%llu\n", cnv->cookie);
+ mlog(ML_ERROR, "did not find lock to convert on grant queue! "
+ "cookie=%llu\n", cnv->cookie);
else
dlm_lock_put(lock);
Modified: trunk/fs/ocfs2/dlm/dlmdebug.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmdebug.c 2005-04-22 22:04:18 UTC (rev 2167)
+++ trunk/fs/ocfs2/dlm/dlmdebug.c 2005-04-23 01:52:24 UTC (rev 2168)
@@ -41,6 +41,9 @@
#include "dlmdomain.h"
+#define MLOG_MASK_PREFIX ML_DLM
+#include "cluster/masklog.h"
+
static void dlm_dump_all_lock_resources(char *data, int len);
static void dlm_dump_lock_resources(dlm_ctxt *dlm);
static void dlm_dump_purge_list(dlm_ctxt *dlm);
@@ -132,7 +135,7 @@
dlm->name, dlm->node_num, dlm->key);
printk("some bug here... should not have to check for this...\n");
if (!dlm || !dlm->name) {
- printk("wtf... dlm=%p\n", dlm);
+ mlog(ML_ERROR, "dlm=%p\n", dlm);
return;
}
@@ -223,33 +226,33 @@
char *tmp, *buf = NULL;
if (len >= PAGE_SIZE) {
- printk("user passed too much data: %d bytes\n", len);
+ mlog(0, "user passed too much data: %d bytes\n", len);
return;
}
if (len < 5) {
- printk("user passed too little data: %d bytes\n", len);
+ mlog(0, "user passed too little data: %d bytes\n", len);
return;
}
buf = kmalloc(len+1, GFP_KERNEL);
if (!buf) {
- printk("could not alloc %d bytes\n", len);
+ mlog(ML_ERROR, "could not alloc %d bytes\n", len);
return;
}
if (strncpy_from_user(buf, data, len) < len) {
- printk("failed to get all user data. done.\n");
+ mlog(ML_ERROR, "failed to get all user data. done.\n");
goto leave;
}
buf[len]='\0';
- dlmprintk("got this data from user: %s\n", buf);
+ mlog(0, "got this data from user: %s\n", buf);
tmp = buf;
if (*tmp != 'M') {
- printk("bad data\n");
+ mlog(0, "bad data\n");
goto leave;
}
tmp++;
if (*tmp != ' ') {
- printk("bad data\n");
+ mlog(0, "bad data\n");
goto leave;
}
tmp++;
@@ -261,7 +264,7 @@
tmp++;
}
if (!*tmp || !*(tmp+1)) {
- printk("bad data\n");
+ mlog(0, "bad data\n");
goto leave;
}
@@ -285,18 +288,18 @@
spin_unlock(&dlm_domain_lock);
if (!dlm_grab(dlm)) {
- printk("bad dlm!\n");
+ mlog(ML_ERROR, "bad dlm!\n");
goto leave;
}
res = dlm_lookup_lockres(dlm, resname, strlen(resname));
if (!res) {
- printk("bad lockres!\n");
+ mlog(ML_ERROR, "bad lockres!\n");
dlm_put(dlm);
goto leave;
}
- printk("woo! found dlm=%p, lockres=%p\n", dlm, res);
+ mlog(0, "found dlm=%p, lockres=%p\n", dlm, res);
{
int ret;
ret = dlm_migrate_lockres(dlm, res, NM_MAX_NODES);
Modified: trunk/fs/ocfs2/dlm/dlmdomain.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmdomain.c 2005-04-22 22:04:18 UTC (rev 2167)
+++ trunk/fs/ocfs2/dlm/dlmdomain.c 2005-04-23 01:52:24 UTC (rev 2168)
@@ -44,6 +44,9 @@
#include "dlmver.h"
+#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_DOMAIN)
+#include "cluster/masklog.h"
+
/*
*
* spinlock lock ordering: if multiple locks are needed, obey this ordering:
@@ -93,8 +96,8 @@
}
dlm_lock_resource * __dlm_lookup_lockres(dlm_ctxt *dlm,
- const char *name,
- unsigned int len)
+ const char *name,
+ unsigned int len)
{
unsigned int hash;
struct list_head *iter;
@@ -103,7 +106,7 @@
BUG_ON(!name);
- dlmprintk0("\n");
+ mlog_entry("%.*s\n", len, name);
assert_spin_locked(&dlm->spinlock);
@@ -217,7 +220,7 @@
spin_unlock(&dlm_domain_lock);
- dlmprintk("freeing memory from domain %s\n", dlm->name);
+ mlog(0, "freeing memory from domain %s\n", dlm->name);
wake_up(&dlm_domain_events);
@@ -313,7 +316,7 @@
int i;
dlm_lock_resource *res;
- dlmprintk("Migrating locks from domain %s\n", dlm->name);
+ mlog(0, "Migrating locks from domain %s\n", dlm->name);
spin_lock(&dlm->spinlock);
for (i=0; i<DLM_HASH_SIZE; i++) {
while (!list_empty(&dlm->resources[i])) {
@@ -321,13 +324,14 @@
dlm_lock_resource, list);
/* this should unhash the lockres
* and exit with dlm->spinlock */
- dlmprintk("purging res=%p\n", res);
+ mlog(0, "purging res=%p\n", res);
if (res->state & DLM_LOCK_RES_DIRTY ||
!list_empty(&res->dirty)) {
- dlmprintk0("this is probably a bug, dirty\n");
/* HACK! this should absolutely go.
* need to figure out why some empty
* lockreses are still marked dirty */
+ mlog(ML_ERROR, "lockres %.*s dirty!\n",
+ res->lockname.len, res->lockname.name);
spin_lock(&res->spinlock);
dlm_shuffle_lists(dlm, res);
list_del_init(&res->dirty);
@@ -339,7 +343,7 @@
}
spin_unlock(&dlm->spinlock);
- dlmprintk("DONE Migrating locks from domain %s\n", dlm->name);
+ mlog(0, "DONE Migrating locks from domain %s\n", dlm->name);
}
static int dlm_no_joining_node(dlm_ctxt *dlm)
@@ -362,7 +366,7 @@
spin_lock(&dlm->spinlock);
if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) {
- dlmprintk("Node %d is joining, we wait on it.\n",
+ mlog(0, "Node %d is joining, we wait on it.\n",
dlm->joining_node);
spin_unlock(&dlm->spinlock);
spin_unlock(&dlm_domain_lock);
@@ -382,13 +386,13 @@
assert_spin_locked(&dlm->spinlock);
- printk("ocfs2_dlm: Nodes in my domain (\"%s\"):\n", dlm->name);
+ mlog(ML_NOTICE, "Nodes in my domain (\"%s\"):\n", dlm->name);
while ((node = find_next_bit(dlm->domain_map, NM_MAX_NODES, node + 1))
!= -1) {
if (node >= NM_MAX_NODES)
break;
- printk(" node %d\n", node);
+ mlog(ML_NOTICE, " node %d\n", node);
}
}
@@ -398,7 +402,7 @@
unsigned int node;
dlm_exit_domain *exit_msg = (dlm_exit_domain *) msg->buf;
- dlmprintk0("\n");
+ mlog_entry("%p %u %p", msg, len, data);
if (!dlm_grab(dlm))
return 0;
@@ -407,7 +411,7 @@
node = exit_msg->node_idx;
- dlmprintk("Node %u leaves domain %s\n", node, dlm->name);
+ mlog(0, "Node %u leaves domain %s\n", node, dlm->name);
spin_lock(&dlm->spinlock);
clear_bit(node, dlm->domain_map);
@@ -425,7 +429,7 @@
int status;
dlm_exit_domain leave_msg;
- dlmprintk("Asking node %u if we can leave the domain %s me = %u\n",
+ mlog(0, "Asking node %u if we can leave the domain %s me = %u\n",
node, dlm->name, dlm->node_num);
memset(&leave_msg, 0, sizeof(leave_msg));
@@ -437,7 +441,7 @@
&leave_msg, sizeof(leave_msg), node,
NULL);
- dlmprintk("status return %d from net_send_message\n", status);
+ mlog(0, "status return %d from net_send_message\n", status);
return status;
}
@@ -474,9 +478,8 @@
if (status < 0 &&
status != -ENOPROTOOPT &&
status != -ENOTCONN) {
- printk("dlm_leave_domain: Error %d sending "
- "domain exit message to node %d\n", status,
- node);
+ mlog(ML_NOTICE, "Error %d sending domain exit message "
+ "to node %d\n", status, node);
/* Not sure what to do here but lets sleep for
* a bit in case this was a transient
@@ -518,7 +521,7 @@
spin_unlock(&dlm_domain_lock);
if (leave) {
- dlmprintk("shutting down domain %s\n", dlm->name);
+ mlog(0, "shutting down domain %s\n", dlm->name);
dlm_migrate_all_locks(dlm);
dlm_mark_domain_leaving(dlm);
dlm_leave_domain(dlm);
@@ -537,7 +540,7 @@
query = (dlm_query_join_request *) msg->buf;
dlm_query_join_request_to_host(query);
- dlmprintk("node %u wants to join domain %s\n", query->node_idx,
+ mlog(0, "node %u wants to join domain %s\n", query->node_idx,
query->domain);
response = JOIN_OK_NO_MAP;
@@ -571,7 +574,7 @@
}
spin_unlock(&dlm_domain_lock);
- dlmprintk("We respond with %u\n", response);
+ mlog(0, "We respond with %u\n", response);
return response;
}
@@ -584,7 +587,7 @@
assert = (dlm_assert_joined *) msg->buf;
dlm_assert_joined_to_host(assert);
- dlmprintk("node %u asserts join on domain %s\n", assert->node_idx,
+ mlog(0, "node %u asserts join on domain %s\n", assert->node_idx,
assert->domain);
spin_lock(&dlm_domain_lock);
@@ -617,7 +620,7 @@
cancel = (dlm_cancel_join *) msg->buf;
dlm_cancel_join_to_host(cancel);
- dlmprintk("node %u cancels join on domain %s\n", cancel->node_idx,
+ mlog(0, "node %u cancels join on domain %s\n", cancel->node_idx,
cancel->domain);
spin_lock(&dlm_domain_lock);
@@ -655,7 +658,7 @@
&cancel_msg, sizeof(cancel_msg), node,
NULL);
if (status < 0) {
- dlmprintk("net_send_message returned %d!\n", status);
+ mlog_errno(status);
goto bail;
}
@@ -686,8 +689,8 @@
tmpstat = dlm_send_one_join_cancel(dlm, node);
if (tmpstat) {
- dlmprintk("Error return %d cancelling join on node "
- "%d\n", tmpstat, node);
+ mlog(ML_ERROR, "Error return %d cancelling join on "
+ "node %d\n", tmpstat, node);
if (!status)
status = tmpstat;
}
@@ -703,7 +706,7 @@
int status, retval;
dlm_query_join_request join_msg;
- dlmprintk("querying node %d\n", node);
+ mlog(0, "querying node %d\n", node);
memset(&join_msg, 0, sizeof(join_msg));
join_msg.node_idx = dlm->node_num;
@@ -715,7 +718,7 @@
status = net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg,
sizeof(join_msg), node, &retval);
if (status < 0 && status != -ENOPROTOOPT && status != -ENOTCONN) {
- dlmprintk("net_send_message returned %d!\n", status);
+ mlog_errno(status);
goto bail;
}
@@ -736,10 +739,11 @@
*response = retval;
} else {
status = -EINVAL;
- dlmprintk("invalid response %d from node %u\n", retval, node);
+ mlog(ML_ERROR, "invalid response %d from node %u\n", retval,
+ node);
}
- dlmprintk("status %d, node %d response is %d\n", status, node,
+ mlog(0, "status %d, node %d response is %d\n", status, node,
*response);
bail:
@@ -752,7 +756,7 @@
int status;
dlm_assert_joined assert_msg;
- dlmprintk("Sending join assert to node %u\n", node);
+ mlog(0, "Sending join assert to node %u\n", node);
memset(&assert_msg, 0, sizeof(assert_msg));
assert_msg.node_idx = dlm->node_num;
@@ -764,7 +768,7 @@
status = net_send_message(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY,
&assert_msg, sizeof(assert_msg), node, NULL);
if (status < 0)
- dlmprintk("net_send_message returned %d!\n", status);
+ mlog_errno(status);
return status;
}
@@ -795,10 +799,10 @@
spin_unlock(&dlm->spinlock);
if (status) {
- dlmprintk("Error return %d asserting join on "
- "node %d\n", status, node);
+ mlog(ML_ERROR, "Error return %d asserting "
+ "join on node %d\n", status, node);
- /* give us some time betweek errors... */
+ /* give us some time between errors... */
if (live)
schedule();
}
@@ -818,7 +822,7 @@
int ret;
if (response == JOIN_DISALLOW) {
- dlmprintk("Latest response of disallow -- should restart\n");
+ mlog(0, "Latest response of disallow -- should restart\n");
return 1;
}
@@ -830,7 +834,7 @@
spin_unlock(&dlm->spinlock);
if (ret)
- dlmprintk("Node maps changed -- should restart\n");
+ mlog(0, "Node maps changed -- should restart\n");
return ret;
}
@@ -841,12 +845,12 @@
struct domain_join_ctxt *ctxt;
enum dlm_query_join_response response;
- dlmprintk0("\n");
+ mlog_entry("%p", dlm);
ctxt = kmalloc(sizeof(struct domain_join_ctxt), GFP_KERNEL);
if (!ctxt) {
- dlmprintk("No memory for domain_join_ctxt\n");
status = -ENOMEM;
+ mlog_errno(status);
goto bail;
}
memset(ctxt, 0, sizeof(*ctxt));
@@ -874,7 +878,7 @@
status = dlm_request_join(dlm, node, &response);
if (status < 0) {
- dlmprintk("%d return from request_join!\n", status);
+ mlog_errno(status);
goto bail;
}
@@ -889,7 +893,7 @@
}
}
- dlmprintk("Yay, done querying nodes!\n");
+ mlog(0, "Yay, done querying nodes!\n");
/* Yay, everyone agree's we can join the domain. My domain is
* comprised of all nodes who were put in the
@@ -916,13 +920,12 @@
ctxt->yes_resp_map,
sizeof(ctxt->yes_resp_map));
if (tmpstat < 0)
- dlmprintk("%d return cancelling join!\n",
- tmpstat);
+ mlog_errno(tmpstat);
}
kfree(ctxt);
}
- dlmprintk("returning %d\n", status);
+ mlog(0, "returning %d\n", status);
return status;
}
@@ -935,7 +938,7 @@
{
int status;
- dlmprintk("registering handlers.\n");
+ mlog(0, "registering handlers.\n");
hb_setup_callback(&dlm->dlm_hb_down, HB_NODE_DOWN_CB,
dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI);
@@ -1061,11 +1064,11 @@
BUG_ON(!dlm);
- dlmprintk("Join domain %s\n", dlm->name);
+ mlog(0, "Join domain %s\n", dlm->name);
status = dlm_register_domain_handlers(dlm);
if (status) {
- dlmprintk("Error %d registering handlers!\n", status);
+ mlog_errno(status);
goto bail;
}
@@ -1073,7 +1076,7 @@
status = dlm_launch_thread(dlm);
if (status < 0) {
- dlmprintk("could not launch dlm thread!\n");
+ mlog_errno(status);
goto bail;
}
@@ -1096,13 +1099,13 @@
} while (status == -EAGAIN);
if (status < 0) {
- dlmprintk("Joining broke! %d\n", status);
+ mlog_errno(status);
goto bail;
}
status = dlm_launch_recovery_thread(dlm);
if (status < 0) {
- dlmprintk("could not launch dlm recovery thread!\n");
+ mlog_errno(status);
goto bail;
}
@@ -1127,14 +1130,14 @@
dlm = kmalloc(sizeof(dlm_ctxt), GFP_KERNEL);
if (!dlm) {
- dlmprintk0("could not allocate dlm_ctxt\n");
+ mlog_errno(-ENOMEM);
goto leave;
}
memset(dlm, 0, sizeof(dlm_ctxt));
dlm->name = kmalloc(strlen(domain) + 1, GFP_KERNEL);
if (dlm->name == NULL) {
- dlmprintk0("could not allocate dlm domain name\n");
+ mlog_errno(-ENOMEM);
kfree(dlm);
dlm = NULL;
goto leave;
@@ -1142,7 +1145,7 @@
dlm->resources = (struct list_head *) __get_free_page(GFP_KERNEL);
if (!dlm->resources) {
- dlmprintk0("could not allocate dlm hash\n");
+ mlog_errno(-ENOMEM);
kfree(dlm->name);
kfree(dlm);
dlm = NULL;
@@ -1171,7 +1174,7 @@
INIT_LIST_HEAD(&dlm->pending_asts);
INIT_LIST_HEAD(&dlm->pending_basts);
- dlmprintk("dlm->recovery_map=%p, &(dlm->recovery_map[0])=%p\n",
+ mlog(0, "dlm->recovery_map=%p, &(dlm->recovery_map[0])=%p\n",
dlm->recovery_map, &(dlm->recovery_map[0]));
memset(dlm->recovery_map, 0, sizeof(dlm->recovery_map));
@@ -1202,7 +1205,7 @@
kref_init(&dlm->dlm_refs, dlm_ctxt_release);
dlm->dlm_state = DLM_CTXT_NEW;
- dlmprintk("context init: refcount %u\n",
+ mlog(0, "context init: refcount %u\n",
atomic_read(&dlm->dlm_refs.refcount));
leave:
@@ -1220,17 +1223,17 @@
dlm_ctxt *new_ctxt = NULL;
if (strlen(domain) > NM_MAX_NAME_LEN) {
- dlmprintk0("domain name length too long\n");
+ mlog(ML_ERROR, "domain name length too long\n");
goto leave;
}
if (!hb_check_local_node_heartbeating()) {
- dlmprintk0("the local node has not ben configured, or is not "
- "heartbeating\n");
+ mlog(ML_ERROR, "the local node has not ben configured, or is "
+ "not heartbeating\n");
goto leave;
}
- dlmprintk("register called for domain \"%s\"\n", domain);
+ mlog(0, "register called for domain \"%s\"\n", domain);
retry:
dlm = NULL;
@@ -1244,7 +1247,7 @@
if (dlm->dlm_state != DLM_CTXT_JOINED) {
spin_unlock(&dlm_domain_lock);
- dlmprintk("This ctxt is not joined yet!\n");
+ mlog(0, "This ctxt is not joined yet!\n");
wait_event_interruptible(dlm_domain_events,
dlm_wait_on_domain_helper(
domain));
@@ -1278,7 +1281,7 @@
ret = dlm_join_domain(dlm);
if (ret) {
- dlmprintk("return code %d from join_domain!\n", ret);
+ mlog_errno(ret);
dlm_put(dlm);
dlm = NULL;
}
@@ -1335,8 +1338,6 @@
dlm_print_version();
- dlmprintk0("Loaded dlm module\n");
-
status = dlm_register_net_handlers();
if (status)
return -1;
@@ -1349,7 +1350,6 @@
static void __exit dlm_exit (void)
{
dlm_unregister_net_handlers();
- dlmprintk0("Unloaded dlm module\n");
} /* dlm_driver_exit */
MODULE_AUTHOR("Oracle");
Modified: trunk/fs/ocfs2/dlm/dlmlock.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmlock.c 2005-04-22 22:04:18 UTC (rev 2167)
+++ trunk/fs/ocfs2/dlm/dlmlock.c 2005-04-23 01:52:24 UTC (rev 2168)
@@ -49,6 +49,9 @@
#include "dlmconvert.h"
+#define MLOG_MASK_PREFIX ML_DLM
+#include "cluster/masklog.h"
+
static spinlock_t dlm_cookie_lock = SPIN_LOCK_UNLOCKED;
static u64 dlm_next_cookie = 1;
@@ -106,7 +109,7 @@
DLM_ASSERT(dlm);
DLM_ASSERT(lock->lksb);
- dlmprintk("type=%d\n", lock->ml.type);
+ mlog_entry("type=%d\n", lock->ml.type);
spin_lock(&res->spinlock);
/* if called from dlm_create_lock_handler, need to
@@ -122,7 +125,7 @@
__dlm_lockres_reserve_ast(res);
if (dlm_can_grant_new_lock(res, lock)) {
- dlmprintk("I can grant this lock right away\n");
+ mlog(0, "I can grant this lock right away\n");
/* got it right away */
lock->lksb->status = DLM_NORMAL;
status = DLM_NORMAL;
@@ -179,9 +182,9 @@
{
dlm_status status = DLM_DENIED;
- dlmprintk("type=%d\n", lock->ml.type);
- dlmprintk("lockres %.*s, flags = 0x%x\n", res->lockname.len,
- res->lockname.name, flags);
+ mlog_entry("type=%d\n", lock->ml.type);
+ mlog(0, "lockres %.*s, flags = 0x%x\n", res->lockname.len,
+ res->lockname.name, flags);
spin_lock(&res->spinlock);
@@ -230,7 +233,7 @@
int tmpret, status = 0;
dlm_status ret;
- dlmprintk0("\n");
+ mlog_entry_void();
memset(&create, 0, sizeof(create));
create.node_idx = dlm->node_num;
@@ -247,8 +250,7 @@
// successfully sent and received
ret = status; // this is already a dlm_status
} else {
- dlmprintk("error occurred in net_send_message: %d\n",
- tmpret);
+ mlog_errno(tmpret);
ret = dlm_err_to_dlm_status(tmpret);
}
@@ -285,10 +287,10 @@
dlm_lock_detach_lockres(lock);
if (lksb->flags & DLM_LKSB_KERNEL_ALLOCATED) {
- dlmprintk0("freeing kernel-allocated lksb\n");
+ mlog(0, "freeing kernel-allocated lksb\n");
kfree(lksb);
} else {
- dlmprintk0("clearing lockid pointer on user-allocated lksb\n");
+ mlog(0, "clearing lockid pointer on user-allocated lksb\n");
lksb->lockid = NULL;
}
kfree(lock);
@@ -312,7 +314,7 @@
res = lock->lockres;
if (res) {
lock->lockres = NULL;
- dlmprintk0("removing lock's lockres reference\n");
+ mlog(0, "removing lock's lockres reference\n");
dlm_lockres_put(res);
}
}
@@ -383,7 +385,7 @@
DLM_ASSERT(dlm);
- dlmprintk0("\n");
+ mlog_entry_void();
if (!dlm_grab(dlm))
return DLM_REJECTED;
@@ -409,7 +411,7 @@
if (create->flags & LKM_GET_LVB) {
lksb->flags |= DLM_LKSB_GET_LVB;
- dlmprintk("set DLM_LKSB_GET_LVB flag\n");
+ mlog(0, "set DLM_LKSB_GET_LVB flag\n");
}
status = DLM_IVLOCKID;
@@ -422,7 +424,7 @@
spin_unlock(&res->spinlock);
if (status != DLM_NORMAL) {
- dlmprintk("lockres recovering/migrating/in-progress\n");
+ mlog(0, "lockres recovering/migrating/in-progress\n");
goto leave;
}
@@ -454,7 +456,7 @@
spin_lock(&dlm_cookie_lock);
*cookie = (dlm_next_cookie | tmpnode);
if (++dlm_next_cookie & 0xff00000000000000ull) {
- dlmprintk0("eek! this node's cookie will now wrap!\n");
+ mlog(0, "This node's cookie will now wrap!\n");
dlm_next_cookie = 1;
}
spin_unlock(&dlm_cookie_lock);
@@ -490,7 +492,7 @@
goto error;
}
if (convert && (flags & LKM_LOCAL)) {
- dlmprintk0("strange LOCAL convert request!\n");
+ mlog(ML_ERROR, "strange LOCAL convert request!\n");
goto error;
}
@@ -500,13 +502,15 @@
/* if converting, must pass in a valid dlm_lock */
lock = lksb->lockid;
if (!lock) {
- dlmerror0("NULL lock pointer in convert request\n");
+ mlog(ML_ERROR, "NULL lock pointer in convert "
+ "request\n");
goto error;
}
res = lock->lockres;
if (!res) {
- dlmerror0("NULL lockres pointer in convert request\n");
+ mlog(ML_ERROR, "NULL lockres pointer in convert "
+ "request\n");
goto error;
}
dlm_lockres_get(res);
@@ -519,11 +523,11 @@
if (lock->lksb != lksb || lock->ast != ast ||
lock->bast != bast || lock->astdata != data) {
status = DLM_BADARGS;
- dlmprintk("ERROR new args: lksb=%p, ast=%p, bast=%p, "
- "astdata=%p\n", lksb, ast, bast, data);
- dlmprintk(" orig args: lksb=%p, ast=%p, bast=%p, "
- "astdata=%p\n", lock->lksb, lock->ast,
- lock->bast, lock->astdata);
+ mlog(ML_ERROR, "new args: lksb=%p, ast=%p, bast=%p, "
+ "astdata=%p\n", lksb, ast, bast, data);
+ mlog(ML_ERROR, "orig args: lksb=%p, ast=%p, bast=%p, "
+ "astdata=%p\n", lock->lksb, lock->ast,
+ lock->bast, lock->astdata);
goto error;
}
retry_convert:
@@ -539,8 +543,8 @@
* and just retry right away. I suspect the reco
* or migration will complete fast enough that
* no waiting will be necessary */
- dlmprintk0("retrying convert with migration/"
- "recovery/in-progress\n");
+ mlog(0, "retrying convert with migration/recovery/"
+ "in-progress\n");
up_read(&dlm->recovery_sem);
yield();
goto retry_convert;
@@ -572,8 +576,8 @@
goto up_error;
}
- dlmprintk("type=%d, flags = 0x%x\n", mode, flags);
- dlmprintk("creating lock: lock=%p res=%p\n", lock, res);
+ mlog(0, "type=%d, flags = 0x%x\n", mode, flags);
+ mlog(0, "creating lock: lock=%p res=%p\n", lock, res);
dlm_lock_attach_lockres(lock, res);
lock->ast = ast;
@@ -582,7 +586,7 @@
retry_lock:
if (flags & LKM_VALBLK) {
- dlmprintk("LKM_VALBLK passed by caller\n");
+ mlog(0, "LKM_VALBLK passed by caller\n");
/* LVB requests for non PR, PW or EX locks are
* ignored. */
@@ -601,8 +605,8 @@
if (status == DLM_RECOVERING || status == DLM_MIGRATING ||
status == DLM_FORWARD) {
- dlmprintk0("retrying lock with migration/"
- "recovery/in progress\n");
+ mlog(0, "retrying lock with migration/"
+ "recovery/in progress\n");
up_read(&dlm->recovery_sem);
yield();
down_read(&dlm->recovery_sem);
Modified: trunk/fs/ocfs2/dlm/dlmmaster.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmmaster.c 2005-04-22 22:04:18 UTC (rev 2167)
+++ trunk/fs/ocfs2/dlm/dlmmaster.c 2005-04-23 01:52:24 UTC (rev 2168)
@@ -47,6 +47,8 @@
#include "dlmapi.h"
#include "dlmcommon.h"
+#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
+#include "cluster/masklog.h"
#ifdef DLM_MLE_DEBUG
static void dlm_dump_mles(dlm_ctxt *dlm);
@@ -304,13 +306,13 @@
spin_lock(&mle->spinlock);
if (!test_bit(idx, mle->node_map))
- dlmprintk("node %u already removed from nodemap!\n", idx);
+ mlog(0, "node %u already removed from nodemap!\n", idx);
else
clear_bit(idx, mle->node_map);
#if 0
if (test_bit(idx, mle->recovery_map))
- dlmprintk("node %u already added to recovery map!\n", idx);
+ mlog(0, "node %u already added to recovery map!\n", idx);
else
set_bit(idx, mle->recovery_map);
#endif
@@ -327,13 +329,13 @@
#if 0
if (test_bit(idx, mle->recovery_map))
- dlmprintk("BUG!!! node up message on node "
- "in recovery (%u)!!!\n", idx);
+ mlog(ML_ERROR, "node up message on node in recovery (%u)!\n",
+ idx);
else
#endif
{
if (test_bit(idx, mle->node_map))
- dlmprintk("node %u already in node map!!!\n", idx);
+ mlog(0, "node %u already in node map!\n", idx);
else
set_bit(idx, mle->node_map);
}
@@ -347,7 +349,7 @@
dlm_master_list_entry *mle;
dlm_ctxt *dlm;
- dlmprintk0("\n");
+ mlog_entry_void();
DLM_ASSERT(kref);
@@ -382,7 +384,8 @@
{
assert_spin_locked(&res->spinlock);
- dlmprintk("setting owner to %u\n", owner);
+ mlog_entry("%.*s, %u\n", res->lockname.len, res->lockname.name, owner);
+
if (owner == dlm->node_num)
atomic_inc(&dlm->local_resources);
else if (owner == DLM_LOCK_RES_OWNER_UNKNOWN)
@@ -423,8 +426,8 @@
* associated with them at init time. */
BUG_ON(!res->lockname.name);
- dlmprintk("destroying lockres %.*s\n", res->lockname.len,
- res->lockname.name);
+ mlog(0, "destroying lockres %.*s\n", res->lockname.len,
+ res->lockname.name);
/* By the time we're ready to blow this guy away, we shouldn't
* be on any lists. */
@@ -544,7 +547,7 @@
namelen = strlen(lockid);
- dlmprintk("get lockres %s (len %d)\n", lockid, namelen);
+ mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
lookup:
spin_lock(&dlm->spinlock);
@@ -552,7 +555,7 @@
if (tmpres) {
spin_unlock(&dlm->spinlock);
- dlmprintk("found in hash!\n");
+ mlog(0, "found in hash!\n");
if (mle)
kfree(mle);
@@ -565,7 +568,7 @@
if (!res) {
spin_unlock(&dlm->spinlock);
- dlmprintk("allocating a new resource\n");
+ mlog(0, "allocating a new resource\n");
/* nothing found and we need to allocate one. */
mle = kmalloc(sizeof(dlm_master_list_entry), GFP_KERNEL);
@@ -581,7 +584,7 @@
goto lookup;
}
- dlmprintk("no lockres found, allocated our own: %p\n", res);
+ mlog(0, "no lockres found, allocated our own: %p\n", res);
if (flags & LKM_LOCAL) {
/* caller knows it's safe to assume it's not mastered elsewhere
@@ -602,8 +605,7 @@
blocked = dlm_find_mle(dlm, &tmpmle, (char *)lockid, namelen);
if (blocked) {
if (tmpmle->type == DLM_MLE_MASTER) {
- dlmprintk0("eek! master entry for nonexistent "
- "lock!\n");
+ mlog(ML_ERROR, "master entry for nonexistent lock!\n");
BUG();
} else if (tmpmle->type == DLM_MLE_MIGRATION) {
/* migration is in progress! */
@@ -659,9 +661,8 @@
dlm_node_iter_init(mle->vote_map, &iter);
while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
ret = dlm_do_master_request(mle, nodenum);
- if (ret < 0) {
- dlmprintk("dlm_do_master_request returned %d\n", ret);
- }
+ if (ret < 0)
+ mlog_errno(ret);
if (mle->master != NM_MAX_NODES) {
/* found a master ! */
break;
@@ -674,14 +675,14 @@
while (ret == -EAGAIN) {
ret = dlm_wait_for_lock_mastery(dlm, res, mle);
if (ret == -EINVAL) {
- dlmprintk0("some error occurred. restarting "
- "lock mastery!\n");
+ mlog(ML_ERROR, "some error occurred. restarting "
+ "lock mastery!\n");
/* TODO: figure out how restart this */
BUG();
}
}
if (ret == 0)
- dlmprintk("lockres mastered by %u\n", res->owner);
+ mlog(0, "lockres mastered by %u\n", res->owner);
/* master is known, detach if not already detached */
dlm_mle_detach_hb_events(dlm, mle);
@@ -723,17 +724,16 @@
/* restart if we hit any errors */
if (mle->error || map_changed) {
if (mle->error) {
- dlmprintk("another node got error %d, restarting\n",
- mle->error);
+ mlog(0, "another node got error %d, restarting\n",
+ mle->error);
mle->error = 0;
}
if (map_changed)
- dlmprintk0("node map changed, restarting\n");
+ mlog(0, "node map changed, restarting\n");
spin_unlock(&mle->spinlock);
tmpret = dlm_restart_lock_mastery(dlm, res, mle);
if (tmpret < 0)
- dlmprintk("dlm_restart_lock_mastery returned %d!\n",
- tmpret);
+ mlog_errno(tmpret);
ret = -EINVAL;
goto leave;
}
@@ -770,9 +770,9 @@
msecs_to_jiffies(5000));
if (ret >= 0 && !atomic_read(&mle->woken)) {
- dlmprintk("timed out during lock mastery: "
- "vote_map=%0lx, response_map=%0lx\n",
- mle->vote_map[0], mle->response_map[0]);
+ mlog(0, "timed out during lock mastery: "
+ "vote_map=%0lx, response_map=%0lx\n",
+ mle->vote_map[0], mle->response_map[0]);
}
/* unless we are aborting, need to recheck and
* maybe sleep again */
@@ -787,19 +787,14 @@
ret = dlm_do_assert_master(dlm, res->lockname.name,
res->lockname.len, mle->vote_map);
if (ret) {
- dlmprintk("dlm_do_assert_master returned %d!\n",
- ret);
+ mlog_errno(ret);
+
tmpret = dlm_restart_lock_mastery(dlm, res, mle);
if (tmpret < 0)
- dlmprintk("dlm_restart_lock_mastery returned "
- "%d!\n", tmpret);
+ mlog_errno(tmpret);
ret = -EINVAL;
goto leave;
}
- if (ret < 0) {
- dlmprintk0("interrupted during lock mastery!\n");
- goto leave;
- }
}
/* set the lockres owner */
@@ -814,8 +809,8 @@
static int dlm_restart_lock_mastery(dlm_ctxt *dlm, dlm_lock_resource *res,
dlm_master_list_entry *mle)
{
- dlmprintk0("something happened such that the whole "
- "master process needs to be restarted!\n");
+ mlog(0, "something happened such that the whole master process needs "
+ "to be restarted!\n");
return 0;
}
@@ -848,7 +843,7 @@
ret = net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
sizeof(request), to, &response);
if (ret < 0) {
- dlmprintk("net_send_message returned %d!\n", ret);
+ mlog_errno(ret);
goto out;
}
@@ -856,29 +851,28 @@
switch (response) {
case DLM_MASTER_RESP_YES:
set_bit(to, mle->response_map);
- // dlmprintk("woot! node %u is the "
+ // mlog(0, "woot! node %u is the "
// "master!\n", to);
mle->master = to;
break;
case DLM_MASTER_RESP_NO:
- // dlmprintk("node %u is not the "
+ // mlog(0, "node %u is not the "
// "master, not in-progress\n", to);
set_bit(to, mle->response_map);
break;
case DLM_MASTER_RESP_MAYBE:
- // dlmprintk("node %u is not the "
+ // mlog(0, "node %u is not the "
// "master, but IS in-progress\n", to);
set_bit(to, mle->response_map);
set_bit(to, mle->maybe_map);
break;
case DLM_MASTER_RESP_ERROR:
- dlmprintk("node %u hit an -ENOMEM! "
- "try everything again\n", to);
+ mlog(0, "node %u hit an -ENOMEM! try everything "
+ "again\n", to);
mle->error = 1;
break;
default:
- dlmprintk("bad response! %u\n",
- response);
+ mlog(0, "bad response! %u\n", response);
ret = -EINVAL;
break;
}
@@ -935,8 +929,8 @@
spin_lock(&res->spinlock);
if (res->state & DLM_LOCK_RES_RECOVERING) {
spin_unlock(&res->spinlock);
- dlmprintk0("returning DLM_MASTER_RESP_ERROR "
- "since res is being recovered\n");
+ mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
+ "being recovered\n");
response = DLM_MASTER_RESP_ERROR;
if (mle)
kfree(mle);
@@ -945,7 +939,7 @@
if (res->owner == dlm->node_num) {
spin_unlock(&res->spinlock);
- // dlmprintk0("this node is the master\n");
+ // mlog(0, "this node is the master\n");
response = DLM_MASTER_RESP_YES;
if (mle)
kfree(mle);
@@ -959,14 +953,14 @@
ret = dlm_dispatch_assert_master(dlm, res, 1,
request->node_idx);
if (ret < 0) {
- dlmerror0("failed to dispatch assert "
- "master work\n");
+ mlog(ML_ERROR, "failed to dispatch assert "
+ "master work\n");
response = DLM_MASTER_RESP_ERROR;
}
goto send_response;
} else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
spin_unlock(&res->spinlock);
- // dlmprintk("node %u is the master\n", res->owner);
+ // mlog(0, "node %u is the master\n", res->owner);
response = DLM_MASTER_RESP_NO;
if (mle)
kfree(mle);
@@ -977,40 +971,38 @@
* being blocked, or it is actively trying to
* master this lock. */
if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
- dlmprintk0("bug! lock with no owner should be "
- "in-progress!\n");
+ mlog(ML_ERROR, "lock with no owner should be "
+ "in-progress!\n");
BUG();
}
- // dlmprintk0("lockres is in progress...\n");
+ // mlog(0, "lockres is in progress...\n");
spin_lock(&dlm->master_lock);
found = dlm_find_mle(dlm, &tmpmle, name, namelen);
if (!found) {
- dlmprintk0("bug bug bug!!! "
- "no mle found for this lock!\n");
+ mlog(ML_ERROR, "no mle found for this lock!\n");
BUG();
}
spin_lock(&tmpmle->spinlock);
if (tmpmle->type == DLM_MLE_BLOCK) {
- // dlmprintk0("this node is waiting for "
+ // mlog(0, "this node is waiting for "
// "lockres to be mastered\n");
response = DLM_MASTER_RESP_NO;
} else if (tmpmle->type == DLM_MLE_MIGRATION) {
- dlmprintk("aha! node %u is master, but trying "
- "to migrate to node %u.\n",
- tmpmle->master, tmpmle->new_master);
+ mlog(0, "node %u is master, but trying to migrate to "
+ "node %u.\n", tmpmle->master, tmpmle->new_master);
if (tmpmle->master == dlm->node_num) {
response = DLM_MASTER_RESP_YES;
- dlmprintk("no owner on lockres, but this node "
- "is trying to migrate it to %u?!\n",
- tmpmle->new_master);
+ mlog(ML_ERROR, "no owner on lockres, but this "
+ "node is trying to migrate it to %u?!\n",
+ tmpmle->new_master);
BUG();
} else {
/* the real master can respond on its own */
response = DLM_MASTER_RESP_NO;
}
} else {
- // dlmprintk0("this node is attempting to "
+ // mlog(0, "this node is attempting to "
// "master lockres\n");
response = DLM_MASTER_RESP_MAYBE;
}
@@ -1037,7 +1029,7 @@
found = dlm_find_mle(dlm, &tmpmle, name, namelen);
if (!found) {
/* this lockid has never been seen on this node yet */
- // dlmprintk0("no mle found\n");
+ // mlog(0, "no mle found\n");
if (!mle) {
spin_unlock(&dlm->master_lock);
spin_unlock(&dlm->spinlock);
@@ -1056,22 +1048,22 @@
goto way_up_top;
}
- // dlmprintk0("this is second time thru, already allocated, "
+ // mlog(0, "this is second time thru, already allocated, "
// "add the block.\n");
set_bit(request->node_idx, mle->maybe_map);
list_add(&mle->list, &dlm->master_list);
response = DLM_MASTER_RESP_NO;
} else {
- // dlmprintk0("mle was found\n");
+ // mlog(0, "mle was found\n");
spin_lock(&tmpmle->spinlock);
if (tmpmle->type == DLM_MLE_BLOCK)
response = DLM_MASTER_RESP_NO;
else if (tmpmle->type == DLM_MLE_MIGRATION) {
- dlmprintk("migration mle was found (%u->%u)\n",
- tmpmle->master, tmpmle->new_master);
+ mlog(0, "migration mle was found (%u->%u)\n",
+ tmpmle->master, tmpmle->new_master);
if (tmpmle->master == dlm->node_num) {
- dlmprintk0("no lockres, but migration mle "
- "says that this node is master!\n");
+ mlog(ML_ERROR, "no lockres, but migration mle "
+ "says that this node is master!\n");
BUG();
}
/* real master can respond on its own */
@@ -1120,7 +1112,7 @@
dlm_node_iter_init(nodemap, &iter);
while ((to = dlm_node_iter_next(&iter)) >= 0) {
int r = 0;
- // dlmprintk("sending assert master to %d\n", to);
+ // mlog(0, "sending assert master to %d\n", to);
memset(&assert, 0, sizeof(assert));
assert.node_idx = dlm->node_num;
assert.namelen = namelen;
@@ -1131,15 +1123,14 @@
&assert, sizeof(assert), to, &r);
if (tmpret < 0) {
// TODO
- // dlmprintk("assert_master returned %d!\n", tmpret);
+ // mlog(0, "assert_master returned %d!\n", tmpret);
ret = tmpret;
break;
} else if (r < 0) {
/* nothing returns this yet */
/* ok, something horribly messed. kill thyself. */
- dlmprintk("during assert master of %.*s to %u, "
- "got %d. BYE BYE!\n",
- namelen, lockname, to, r);
+ mlog(ML_ERROR,"during assert master of %.*s to %u, "
+ "got %d.\n", namelen, lockname, to, r);
BUG();
}
}
@@ -1173,7 +1164,7 @@
namelen = assert->namelen;
if (namelen > DLM_LOCKID_NAME_MAX) {
- printk("Invalid name length in master assert handler!\n");
+ mlog(ML_ERROR, "Invalid name length!");
goto done;
}
@@ -1182,18 +1173,18 @@
/* find the MLE */
spin_lock(&dlm->master_lock);
if (!dlm_find_mle(dlm, &mle, name, namelen)) {
- dlmprintk("just got an assert_master from %u, but no "
- "MLE for it!\n", assert->node_idx);
+ mlog(0, "just got an assert_master from %u, but no "
+ "MLE for it!\n", assert->node_idx);
} else {
int bit = find_next_bit (mle->maybe_map, NM_MAX_NODES, 0);
if (bit >= NM_MAX_NODES) {
- dlmprintk("EEK! no bits set in the maybe_map, but %u "
- "is asserting!\n", assert->node_idx);
+ mlog(ML_ERROR, "no bits set in the maybe_map, but %u "
+ "is asserting!\n", assert->node_idx);
BUG();
} else if (bit != assert->node_idx) {
/* TODO: is this ok? */
- dlmprintk("EEK! expected %u to be the master, but %u "
- "is asserting!\n", bit, assert->node_idx);
+ mlog(ML_ERROR, "expected %u to be the master, but %u "
+ "is asserting!\n", bit, assert->node_idx);
BUG();
}
}
@@ -1207,33 +1198,31 @@
DLM_ASSERT(!(res->state & DLM_LOCK_RES_RECOVERING));
if (!mle) {
if (res->owner != assert->node_idx) {
- dlmprintk("EEEEeeEEeeEEEK! assert_master from "
+ mlog(ML_ERROR, "assert_master from "
"%u, but current owner is %u!\n",
assert->node_idx, res->owner);
BUG();
}
} else if (mle->type != DLM_MLE_MIGRATION) {
if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
- dlmprintk("EEEEEEEEEEEEEEEEEK!!! got "
- "assert_master from node %u, but %u "
- "is the owner!\n", assert->node_idx,
- res->owner);
- dlmprintk0("goodnite!\n");
+ mlog(ML_ERROR, "got assert_master from "
+ "node %u, but %u is the owner!\n",
+ assert->node_idx, res->owner);
BUG();
}
if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
- dlmprintk("bug! got assert from %u, but lock "
- "with no owner should be "
- "in-progress!\n", assert->node_idx);
+ mlog(ML_ERROR, "got assert from %u, but lock "
+ "with no owner should be "
+ "in-progress!\n", assert->node_idx);
BUG();
}
} else /* mle->type == DLM_MLE_MIGRATION */ {
/* should only be getting an assert from new master */
if (assert->node_idx != mle->new_master) {
- dlmprintk("migration: got assert from %u, but "
- "new master is %u, and old master "
- "was %u\n", assert->node_idx,
- mle->new_master, mle->master);
+ mlog(ML_ERROR, "got assert from %u, but "
+ "new master is %u, and old master "
+ "was %u\n", assert->node_idx,
+ mle->new_master, mle->master);
BUG();
}
@@ -1242,7 +1231,7 @@
}
spin_unlock(&dlm->spinlock);
- // dlmprintk("woo! got an assert_master from node %u!\n",
+ // mlog(0, "woo! got an assert_master from node %u!\n",
// assert->node_idx);
if (mle) {
spin_lock(&mle->spinlock);
@@ -1252,7 +1241,7 @@
spin_unlock(&mle->spinlock);
if (mle->type == DLM_MLE_MIGRATION && res) {
- dlmprintk0("finishing off migration of lockres\n");
+ mlog(0, "finishing off migration of lockres\n");
spin_lock(&res->spinlock);
res->state &= ~DLM_LOCK_RES_MIGRATING;
dlm_change_lockres_owner(dlm, res, mle->new_master);
@@ -1346,13 +1335,13 @@
if (ret < 0) {
/* no choice but to try again.
* maybe a node died. */
- dlmerror("assert master returned %d!\n", ret);
+ mlog_errno(ret);
}
} while (ret < 0);
dlm_lockres_put(res);
- dlmprintk0("finished with dlm_assert_master_worker\n");
+ mlog(0, "finished with dlm_assert_master_worker\n");
}
@@ -1381,23 +1370,23 @@
name = res->lockname.name;
namelen = res->lockname.len;
- dlmprintk("migrating %.*s to %u\n", namelen, name, target);
+ mlog(0, "migrating %.*s to %u\n", namelen, name, target);
/*
* ensure this lockres is a proper candidate for migration
*/
spin_lock(&res->spinlock);
if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
- dlmprintk0("cannot migrate lockres with unknown owner!\n");
+ mlog(0, "cannot migrate lockres with unknown owner!\n");
spin_unlock(&res->spinlock);
goto leave;
}
if (res->owner != dlm->node_num) {
- dlmprintk0("cannot migrate lockres this node doesn't own!\n");
+ mlog(0, "cannot migrate lockres this node doesn't own!\n");
spin_unlock(&res->spinlock);
goto leave;
}
- dlmprintk0("checking queues...\n");
+ mlog(0, "checking queues...\n");
queue = &res->granted;
for (i=0; i<3; i++) {
list_for_each(iter, queue) {
@@ -1405,11 +1394,11 @@
DLM_ASSERT(lock);
empty = 0;
if (lock->ml.node == dlm->node_num) {
- dlmprintk("found a lock owned by this node "
- "still on the %s queue! will not "
- "migrate this lockres\n",
- i==0 ? "granted" :
- (i==1 ? "converting" : "blocked"));
+ mlog(0, "found a lock owned by this node "
+ "still on the %s queue! will not "
+ "migrate this lockres\n",
+ i==0 ? "granted" :
+ (i==1 ? "converting" : "blocked"));
spin_unlock(&res->spinlock);
ret = -ENOTEMPTY;
goto leave;
@@ -1417,12 +1406,12 @@
}
queue++;
}
- dlmprintk0("all locks on this lockres are nonlocal. continuing\n");
+ mlog(0, "all locks on this lockres are nonlocal. continuing\n");
spin_unlock(&res->spinlock);
/* no work to do */
if (empty) {
- dlmprintk0("no locks were found on this lockres! done!\n");
+ mlog(0, "no locks were found on this lockres! done!\n");
ret = 0;
goto leave;
}
@@ -1435,7 +1424,7 @@
ret = -ENOMEM;
mres = (dlm_migratable_lockres *) __get_free_page(GFP_KERNEL);
if (!mres) {
- dlmprintk0("failed to get free page!\n");
+ mlog_errno(ret);
goto leave;
}
@@ -1450,14 +1439,14 @@
* find a node to migrate the lockres to
*/
- dlmprintk0("picking a migration node\n");
+ mlog(0, "picking a migration node\n");
spin_lock(&dlm->spinlock);
/* pick a new node */
if (!test_bit(target, dlm->domain_map) ||
target >= NM_MAX_NODES) {
target = dlm_pick_migration_target(dlm, res);
}
- dlmprintk("node %u chosen for migration\n", target);
+ mlog(0, "node %u chosen for migration\n", target);
if (target >= NM_MAX_NODES ||
!test_bit(target, dlm->domain_map)) {
@@ -1470,7 +1459,7 @@
goto fail;
}
- dlmprintk("continuing with target = %u\n", target);
+ mlog(0, "continuing with target = %u\n", target);
/*
* clear any existing master requests and
@@ -1483,7 +1472,7 @@
spin_unlock(&dlm->spinlock);
if (ret == -EEXIST) {
- dlmprintk0("another process is already migrating it\n");
+ mlog(0, "another process is already migrating it\n");
goto fail;
}
mle_added = 1;
@@ -1530,8 +1519,8 @@
DLM_MRES_MIGRATION);
if (ret < 0) {
- dlmprintk("migration to node %u failed with %d\n",
- target, ret);
+ mlog(0, "migration to node %u failed with %d\n",
+ target, ret);
/* migration failed, detach and clean up mle */
dlm_mle_detach_hb_events(dlm, mle);
dlm_put_mle(mle);
@@ -1561,7 +1550,7 @@
res->owner == target)
break;
- dlmprintk0("timed out during migration\n");
+ mlog(0, "timed out during migration\n");
}
if (ret == -ERESTARTSYS) {
/* migration failed, detach and clean up mle */
@@ -1592,7 +1581,8 @@
free_page((unsigned long)mres);
dlm_put(dlm);
- dlmprintk("woo. returning %d\n", ret);
+
+ mlog(0, "returning %d\n", ret);
return ret;
}
EXPORT_SYMBOL(dlm_migrate_lockres);
@@ -1650,8 +1640,8 @@
lock = list_entry (iter, dlm_lock, list);
DLM_ASSERT(lock);
if (lock->ml.node != dlm->node_num) {
- dlmprintk("putting lock for node %u\n",
- lock->ml.node);
+ mlog(0, "putting lock for node %u\n",
+ lock->ml.node);
/* be extra careful */
DLM_ASSERT(list_empty(&lock->ast_list));
DLM_ASSERT(list_empty(&lock->bast_list));
@@ -1693,24 +1683,23 @@
queue++;
}
spin_unlock(&res->spinlock);
- dlmprintk0("have not found a suitable target yet! "
- "checking domain map\n");
+ mlog(0, "have not found a suitable target yet! checking domain map\n");
/* ok now we're getting desperate. pick anyone alive. */
nodenum = -1;
while (1) {
nodenum = find_next_bit(dlm->domain_map,
NM_MAX_NODES, nodenum+1);
- dlmprintk("found %d in domain map\n", nodenum);
+ mlog(0, "found %d in domain map\n", nodenum);
if (nodenum >= NM_MAX_NODES)
break;
if (nodenum != dlm->node_num) {
- dlmprintk("aha. picking %d\n", nodenum);
+ mlog(0, "picking %d\n", nodenum);
return nodenum;
}
}
- dlmprintk0("giving up. no master to migrate to\n");
+ mlog(0, "giving up. no master to migrate to\n");
return DLM_LOCK_RES_OWNER_UNKNOWN;
}
@@ -1744,17 +1733,18 @@
ret = net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
&migrate, sizeof(migrate), nodenum, &status);
if (ret < 0)
- dlmprintk("net_send_message returned %d!\n", ret);
+ mlog_errno(ret);
else if (status < 0) {
- dlmprintk("migrate request (node %u) returned %d!\n",
- nodenum, status);
+ mlog(0, "migrate request (node %u) returned %d!\n",
+ nodenum, status);
ret = status;
}
}
if (ret < 0)
- dlmprintk("nasty error occurred. %d\n", ret);
- dlmprintk("returning ret=%d\n", ret);
+ mlog_errno(ret);
+
+ mlog(0, "returning ret=%d\n", ret);
return ret;
}
@@ -1802,8 +1792,8 @@
* a migrate request from a node that we now see as
* dead. what can we do here? drop it to the floor? */
spin_unlock(&res->spinlock);
- dlmprintk0("grrrr. got a migrate request, but the "
- "lockres is marked as recovering!");
+ mlog(ML_ERROR, "Got a migrate request, but the "
+ "lockres is marked as recovering!");
kfree(mle);
ret = -EINVAL; /* need a better solution */
goto unlock;
@@ -1854,7 +1844,7 @@
*oldmle = NULL;
- dlmprintk0("\n");
+ mlog_entry_void();
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&dlm->master_lock);
@@ -1867,19 +1857,19 @@
if (tmp->type == DLM_MLE_MIGRATION) {
if (master == dlm->node_num) {
/* ah another process raced me to it */
- dlmprintk("tried to migrate %.*s, but some "
- "process beat me to it\n",
- namelen, name);
+ mlog(0, "tried to migrate %.*s, but some "
+ "process beat me to it\n",
+ namelen, name);
ret = -EEXIST;
} else {
/* bad. 2 NODES are trying to migrate! */
- dlmerror("migration error. mle: master=%u "
- "new_master=%u // request: "
- "master=%u new_master=%u // "
- "lockres=%.*s\n",
- tmp->master, tmp->new_master,
- master, new_master,
- namelen, name);
+ mlog(ML_ERROR, "migration error mle: "
+ "master=%u new_master=%u // request: "
+ "master=%u new_master=%u // "
+ "lockres=%.*s\n",
+ tmp->master, tmp->new_master,
+ master, new_master,
+ namelen, name);
BUG();
}
} else {
@@ -1963,9 +1953,9 @@
wake_up(&mle->wq);
if (mle->type == DLM_MLE_MIGRATION) {
- dlmprintk("node %u died during migration from "
- "%u to %u!\n", dead_node,
- mle->master, mle->new_master);
+ mlog(0, "node %u died during migration from "
+ "%u to %u!\n", dead_node,
+ mle->master, mle->new_master);
/* if there is a lockres associated with this
* mle, find it and set its owner to UNKNOWN */
res = __dlm_lookup_lockres(dlm, mle->u.name.name,
@@ -2014,22 +2004,21 @@
clear_bit(dlm->node_num, iter.node_map);
spin_unlock(&dlm->spinlock);
- dlmprintk0("now time to do a migrate request to other nodes\n");
+ mlog(0, "now time to do a migrate request to other nodes\n");
ret = dlm_do_migrate_request(dlm, res, old_master,
dlm->node_num, &iter);
if (ret < 0) {
- dlmprintk("error %d\n", ret);
+ mlog_errno(ret);
goto leave;
}
retry:
- dlmprintk0("doing assert master to all except the original node\n");
+ mlog(0, "doing assert master to all except the original node\n");
ret = dlm_do_assert_master(dlm, res->lockname.name,
res->lockname.len, iter.node_map);
if (ret < 0) {
- dlmprintk("bad news. assert master returned %d "
- "while trying to finish migration. retry?\n",
- ret);
+ mlog_errno(ret);
+
/* maybe we can be saved by updating the domain map */
spin_lock(&dlm->spinlock);
dlm_node_iter_init(dlm->domain_map, &iter);
@@ -2041,12 +2030,12 @@
memset(iter.node_map, 0, sizeof(iter.node_map));
set_bit(old_master, iter.node_map);
- dlmprintk("doing assert master back to %u\n", old_master);
+ mlog(0, "doing assert master back to %u\n", old_master);
ret = dlm_do_assert_master(dlm, res->lockname.name,
res->lockname.len, iter.node_map);
if (ret < 0) {
- dlmprintk("assert master to original master failed "
- "with %d.\n", ret);
+ mlog(0, "assert master to original master failed "
+ "with %d.\n", ret);
/* the only nonzero status here would be because of
* a dead original node. we're done. */
}
Modified: trunk/fs/ocfs2/dlm/dlmrecovery.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmrecovery.c 2005-04-22 22:04:18 UTC (rev 2167)
+++ trunk/fs/ocfs2/dlm/dlmrecovery.c 2005-04-23 01:52:24 UTC (rev 2168)
@@ -48,6 +48,9 @@
#include "dlmapi.h"
#include "dlmcommon.h"
+#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
+#include "cluster/masklog.h"
+
static void dlm_do_local_recovery_cleanup(dlm_ctxt *dlm, u8 dead_node);
static int dlm_recovery_thread(void *data);
@@ -172,14 +175,13 @@
/* Launch the recovery thread */
int dlm_launch_recovery_thread(dlm_ctxt *dlm)
{
- dlmprintk0("starting dlm recovery thread...\n");
+ mlog(0, "starting dlm recovery thread...\n");
dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
"dlm_reco_thread");
if (IS_ERR(dlm->dlm_reco_thread_task)) {
+ mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
dlm->dlm_reco_thread_task = NULL;
- dlmprintk("unable to launch dlm recovery thread, error=%ld",
- PTR_ERR(dlm->dlm_reco_thread_task));
return -EINVAL;
}
@@ -189,7 +191,7 @@
void dlm_complete_recovery_thread(dlm_ctxt *dlm)
{
if (dlm->dlm_reco_thread_task) {
- dlmprintk0("waiting for dlm recovery thread to exit\n");
+ mlog(0, "waiting for dlm recovery thread to exit\n");
kthread_stop(dlm->dlm_reco_thread_task);
dlm->dlm_reco_thread_task = NULL;
}
@@ -229,7 +231,7 @@
dlm_ctxt *dlm = data;
unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
- dlmprintk("dlm thread running for %s...\n", dlm->name);
+ mlog(0, "dlm thread running for %s...\n", dlm->name);
while (!kthread_should_stop()) {
status = dlm_do_recovery(dlm);
@@ -238,13 +240,14 @@
continue;
}
if (status < 0)
- dlmprintk("dlm_do_recovery returned %d\n", status);
+ mlog_errno(status);
+
wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
kthread_should_stop(),
timeout);
}
- dlmprintk0("quitting DLM recovery thread\n");
+ mlog(0, "quitting DLM recovery thread\n");
return 0;
}
@@ -258,8 +261,8 @@
/* check to see if the new master has died */
if (dlm->reco.new_master != NM_INVALID_SLOT_NUM &&
test_bit(dlm->reco.new_master, dlm->recovery_map)) {
- dlmprintk("new master %u died while recovering %u!\n",
- dlm->reco.new_master, dlm->reco.dead_node);
+ mlog(0, "new master %u died while recovering %u!\n",
+ dlm->reco.new_master, dlm->reco.dead_node);
/* unset the new_master, leave dead_node */
dlm->reco.new_master = NM_INVALID_SLOT_NUM;
}
@@ -275,19 +278,19 @@
dlm->reco.dead_node = bit;
} else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
/* BUG? */
- dlmprintk("dead_node %u no longer in recovery map!\n",
- dlm->reco.dead_node);
+ mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
+ dlm->reco.dead_node);
dlm->reco.dead_node = NM_INVALID_SLOT_NUM;
}
if (dlm->reco.dead_node == NM_INVALID_SLOT_NUM) {
- // dlmprintk0("nothing to recover! sleeping now!\n");
+ // mlog(0, "nothing to recover! sleeping now!\n");
spin_unlock(&dlm->spinlock);
/* return to main thread loop and sleep. */
return 0;
}
- dlmprintk("recovery thread found node %u in the recovery map!\n",
- dlm->reco.dead_node);
+ mlog(0, "recovery thread found node %u in the recovery map!\n",
+ dlm->reco.dead_node);
spin_unlock(&dlm->spinlock);
/* take write barrier */
@@ -304,12 +307,10 @@
dlm->reco.new_master = dlm->node_num;
goto master_here;
}
- dlmprintk0("another node will master this "
- "recovery session. wait.\n");
+ mlog(0, "another node will master this recovery session.\n");
} else {
- dlmprintk("RECOVERY! new_master=%u, this node=%u, "
- "dead_node=%u\n", dlm->reco.new_master,
- dlm->node_num, dlm->reco.dead_node);
+ mlog(0, "new_master=%u, this node=%u, dead_node=%u\n",
+ dlm->reco.new_master, dlm->node_num, dlm->reco.dead_node);
}
/* it is safe to start everything back up here
@@ -321,13 +322,12 @@
return 0;
master_here:
- dlmprintk("RECOVERY! mastering recovery of %u HERE!\n",
- dlm->reco.dead_node);
+ mlog(0, "mastering recovery of %u here!\n", dlm->reco.dead_node);
status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
if (status < 0) {
- dlmprintk("error remastering locks for node %u!!!! "
- "retrying!\n", dlm->reco.dead_node);
+ mlog(ML_ERROR, "error %d remastering locks for node %u, "
+ "retrying.\n", status, dlm->reco.dead_node);
} else {
/* success! see if any other nodes need recovery */
dlm_reset_recovery(dlm);
@@ -365,8 +365,8 @@
DLM_ASSERT(ndata->state == DLM_RECO_NODE_DATA_INIT);
ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
- dlmprintk("requesting lock info from node %u\n",
- ndata->node_num);
+ mlog(0, "requesting lock info from node %u\n",
+ ndata->node_num);
if (ndata->node_num == dlm->node_num) {
ndata->state = DLM_RECO_NODE_DATA_DONE;
@@ -386,29 +386,33 @@
DLM_ASSERT(0);
break;
case DLM_RECO_NODE_DATA_DEAD:
- dlmprintk("eek. node %u died after requesting recovery info for node %u\n",
- ndata->node_num, dead_node);
+ mlog(0, "node %u died after requesting "
+ "recovery info for node %u\n",
+ ndata->node_num, dead_node);
// start all over
destroy = 1;
status = -EAGAIN;
goto leave;
case DLM_RECO_NODE_DATA_REQUESTING:
ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
- dlmprintk("now receiving recovery data from node %u for dead node %u\n",
- ndata->node_num, dead_node);
+ mlog(0, "now receiving recovery data from "
+ "node %u for dead node %u\n",
+ ndata->node_num, dead_node);
break;
case DLM_RECO_NODE_DATA_RECEIVING:
- dlmprintk("already receiving recovery data from node %u for dead node %u\n",
- ndata->node_num, dead_node);
+ mlog(0, "already receiving recovery data from "
+ "node %u for dead node %u\n",
+ ndata->node_num, dead_node);
break;
case DLM_RECO_NODE_DATA_DONE:
- dlmprintk("already DONE receiving recovery data from node %u for dead node %u\n",
- ndata->node_num, dead_node);
+ mlog(0, "already DONE receiving recovery data "
+ "from node %u for dead node %u\n",
+ ndata->node_num, dead_node);
break;
}
}
- dlmprintk0("done requesting all lock info\n");
+ mlog(0, "done requesting all lock info\n");
/* nodes should be sending reco data now
* just need to wait */
@@ -420,21 +424,22 @@
spin_lock(&dlm_reco_state_lock);
list_for_each(iter, &dlm->reco.node_data) {
ndata = list_entry (iter, dlm_reco_node_data, list);
-
- dlmprintk("checking reco state of node %u\n",
- ndata->node_num);
+
+ mlog(0, "checking recovery state of node %u\n",
+ ndata->node_num);
switch (ndata->state) {
case DLM_RECO_NODE_DATA_INIT:
case DLM_RECO_NODE_DATA_REQUESTING:
- dlmprintk("bad ndata state for node %u:"
- " state=%d\n",
- ndata->node_num,
- ndata->state);
+ mlog(ML_ERROR, "bad ndata state for "
+ "node %u: state=%d\n",
+ ndata->node_num, ndata->state);
BUG();
break;
case DLM_RECO_NODE_DATA_DEAD:
- dlmprintk("eek. node %u died after requesting recovery info for node %u\n",
- ndata->node_num, dead_node);
+ mlog(0, "node %u died after "
+ "requesting recovery info for "
+ "node %u\n", ndata->node_num,
+ dead_node);
spin_unlock(&dlm_reco_state_lock);
// start all over
destroy = 1;
@@ -452,25 +457,24 @@
}
spin_unlock(&dlm_reco_state_lock);
- dlmprintk("pass #%d, all_nodes_done?: %s\n",
- ++pass, all_nodes_done?"yes":"no");
+ mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
+ all_nodes_done?"yes":"no");
if (all_nodes_done) {
int ret;
/* all nodes are now in DLM_RECO_NODE_DATA_DONE state
* just send a finalize message to everyone and
* clean up */
- dlmprintk0("all nodes are done! send finalize\n");
+ mlog(0, "all nodes are done! send finalize\n");
ret = dlm_send_finalize_reco_message(dlm);
- if (ret < 0) {
- dlmprintk("dlm_send_finalize_reco_message "
- "returned %d\n", ret);
- }
+ if (ret < 0)
+ mlog_errno(ret);
+
spin_lock(&dlm->spinlock);
dlm_finish_local_lockres_recovery(dlm, dead_node,
dlm->node_num);
spin_unlock(&dlm->spinlock);
- dlmprintk0("should be done with recovery!\n");
+ mlog(0, "should be done with recovery!\n");
destroy = 1;
status = ret;
break;
@@ -486,7 +490,8 @@
leave:
if (destroy)
dlm_destroy_recovery_area(dlm, dead_node);
- dlmprintk("returning status=%d\n", status);
+
+ mlog_exit(status);
return status;
}
@@ -547,10 +552,10 @@
dlm_lock_request lr;
dlm_status ret;
- dlmprintk0("\n");
+ mlog(0, "\n");
- dlmprintk("dlm_request_all_locks: dead node is %u, sending request "
+ mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
"to %u\n", dead_node, request_from);
memset(&lr, 0, sizeof(lr));
@@ -564,7 +569,7 @@
&lr, sizeof(lr),
request_from, NULL);
if (ret < 0)
- dlmprintk("error occurred in net_send_message: %d\n", ret);
+ mlog_errno(ret);
// return from here, then
// sleep until all received or error
@@ -651,10 +656,8 @@
res = list_entry (iter, dlm_lock_resource, recovering);
ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
DLM_MRES_RECOVERY);
- if (ret < 0) {
- dlmprintk("send_one_lockres returned %d\n",
- ret);
- }
+ if (ret < 0)
+ mlog_errno(ret);
}
/* move the resources back to the list */
@@ -664,8 +667,7 @@
ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
if (ret < 0)
- dlmprintk("recovery data-done message "
- "returned %d\n", ret);
+ mlog_errno(ret);
free_page((unsigned long)data);
}
@@ -721,15 +723,14 @@
case DLM_RECO_NODE_DATA_DEAD:
case DLM_RECO_NODE_DATA_DONE:
case DLM_RECO_NODE_DATA_FINALIZE_SENT:
- dlmprintk("bad ndata state for node %u:"
- " state=%d\n",
- ndata->node_num,
- ndata->state);
+ mlog(ML_ERROR, "bad ndata state for node %u:"
+ " state=%d\n", ndata->node_num,
+ ndata->state);
BUG();
break;
case DLM_RECO_NODE_DATA_RECEIVING:
case DLM_RECO_NODE_DATA_REQUESTED:
- dlmprintk("node %u is DONE sending "
+ mlog(0, "node %u is DONE sending "
"recovery data!\n",
ndata->node_num);
ndata->state = DLM_RECO_NODE_DATA_DONE;
@@ -744,8 +745,8 @@
dlm_kick_recovery_thread(dlm);
if (ret < 0)
- dlmprintk("failed to find recovery node data for node %u\n",
- done->node_idx);
+ mlog(ML_ERROR, "failed to find recovery node data for node "
+ "%u\n", done->node_idx);
dlm_put(dlm);
return ret;
}
@@ -763,13 +764,13 @@
res->lockname.len))
continue;
if (res->owner == dead_node) {
- dlmprintk("found lockres owned by dead node while "
+ mlog(0, "found lockres owned by dead node while "
"doing recovery for node %u. sending it.\n",
dead_node);
list_del_init(&res->recovering);
list_add_tail(&res->recovering, list);
} else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
- dlmprintk("found UNKNOWN owner while doing recovery "
+ mlog(0, "found UNKNOWN owner while doing recovery "
"for node %u. sending it.\n", dead_node);
list_del_init(&res->recovering);
list_add_tail(&res->recovering, list);
@@ -825,15 +826,16 @@
ret = net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
sz, send_to, &status);
if (ret < 0) {
- dlmerror("net_send_message returned %d\n", ret);
+ mlog_errno(ret);
} else {
/* might get an -ENOMEM back here */
ret = status;
if (ret < 0) {
- dlmerror("migrate lockres got status=%d\n", ret);
+ mlog_errno(ret);
+
if (ret == -EFAULT) {
- dlmerror("node %u told me to kill myself!\n",
- send_to);
+ mlog(ML_ERROR, "node %u told me to kill "
+ "myself!\n", send_to);
BUG();
}
}
@@ -912,12 +914,12 @@
DLM_ASSERT(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION));
- dlmprintk("sending to %u\n", send_to);
+ mlog(0, "sending to %u\n", send_to);
total_locks = dlm_num_locks_in_lockres(res);
if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
/* rare, but possible */
- dlmprintk("argh. lockres has %d locks. this will "
+ mlog(0, "argh. lockres has %d locks. this will "
"require more than one network packet to "
"migrate\n", total_locks);
mig_cookie = dlm_get_next_mig_cookie();
@@ -993,11 +995,11 @@
DLM_ASSERT(mres->flags & DLM_MRES_RECOVERY);
}
- dlmprintk("%s message received from node %u\n",
+ mlog(0, "%s message received from node %u\n",
(mres->flags & DLM_MRES_RECOVERY) ?
"recovery" : "migration", mres->master);
if (mres->flags & DLM_MRES_ALL_DONE)
- dlmprintk0("all done flag. all lockres data received!\n");
+ mlog(0, "all done flag. all lockres data received!\n");
ret = -ENOMEM;
buf = kmalloc(msg->data_len, GFP_KERNEL);
@@ -1019,14 +1021,14 @@
if (res->state & DLM_LOCK_RES_MIGRATING) {
/* this is at least the second
* lockres message */
- dlmprintk("lock %.*s is already migrating\n",
+ mlog(0, "lock %.*s is already migrating\n",
mres->lockname_len,
mres->lockname);
} else if (res->state & DLM_LOCK_RES_RECOVERING) {
/* caller should BUG */
- dlmerror("node is attempting to migrate lock "
- "%.*s, but marked as recovering!\n",
- mres->lockname_len, mres->lockname);
+ mlog(ML_ERROR, "node is attempting to migrate "
+ "lock %.*s, but marked as recovering!\n",
+ mres->lockname_len, mres->lockname);
ret = -EFAULT;
spin_unlock(&res->spinlock);
goto leave;
@@ -1063,7 +1065,7 @@
if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
/* migration cannot have an unknown master */
DLM_ASSERT(mres->flags & DLM_MRES_RECOVERY);
- dlmprintk("recovery has passed me a lockres with an "
+ mlog(0, "recovery has passed me a lockres with an "
"unknown owner.. will need to requery: "
"%.*s\n", mres->lockname_len, mres->lockname);
} else {
@@ -1092,7 +1094,8 @@
if (item)
kfree(item);
}
- dlmprintk("returning ret=%d\n", ret);
+
+ mlog_exit(ret);
return ret;
}
@@ -1122,16 +1125,16 @@
again:
ret = dlm_lockres_master_requery(dlm, res, &real_master);
if (ret < 0) {
- dlmprintk("ugh. awful place to fail. ret=%d\n",
+ mlog(0, "dlm_lockres_master_requery failure: %d\n",
ret);
goto again;
}
if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
- dlmprintk("lockres %.*s not claimed. "
+ mlog(0, "lockres %.*s not claimed. "
"this node will take it.\n",
res->lockname.len, res->lockname.name);
} else {
- dlmprintk("master needs to respond to sender "
+ mlog(0, "master needs to respond to sender "
"that node %u still owns %.*s\n",
real_master, res->lockname.len,
res->lockname.name);
@@ -1142,20 +1145,20 @@
ret = dlm_process_recovery_data(dlm, res, mres);
if (ret < 0)
- dlmprintk("dlm_process_recovery_data returned %d\n", ret);
+ mlog(0, "dlm_process_recovery_data returned %d\n", ret);
else
- dlmprintk0("woo dlm_process_recovery_data succeeded\n");
+ mlog(0, "dlm_process_recovery_data succeeded\n");
if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
(DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
ret = dlm_finish_migration(dlm, res, mres->master);
if (ret < 0)
- dlmprintk("finish migration returned %d\n", ret);
+ mlog_errno(ret);
}
leave:
kfree(data);
- dlmprintk("ret=%d\n", ret);
+ mlog_exit(ret);
}
@@ -1199,13 +1202,12 @@
while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
if (ret < 0) {
- dlmprintk("ugh. bad place to fail. ret=%d\n", ret);
+ mlog_errno(ret);
BUG();
/* TODO: need to figure a way to restart this */
}
if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
- dlmprintk("aha! lock master is %u\n",
- *real_master);
+ mlog(0, "lock master is %u\n", *real_master);
break;
}
}
@@ -1229,12 +1231,12 @@
ret = net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
&req, sizeof(req), nodenum, &status);
if (ret < 0)
- dlmprintk("net_send_message returned %d!\n", ret);
+ mlog_errno(ret);
else {
DLM_ASSERT(status >= 0);
DLM_ASSERT(status <= DLM_LOCK_RES_OWNER_UNKNOWN);
*real_master = (u8) (status & 0xff);
- dlmprintk("node %u responded to master requery with %u\n",
+ mlog(0, "node %u responded to master requery with %u\n",
nodenum, *real_master);
ret = 0;
}
@@ -1268,8 +1270,7 @@
if (master == dlm->node_num) {
int ret = dlm_dispatch_assert_master(dlm, res, 0, 0);
if (ret < 0) {
- dlmerror0("could not allocate enough memory "
- "to send assert_master message!\n");
+ mlog_errno(-ENOMEM);
/* retry!? */
BUG();
}
@@ -1332,7 +1333,7 @@
struct list_head *iter;
dlm_lock *lock = NULL;
- dlmprintk("running %d locks for this lockres\n", mres->num_locks);
+ mlog(0, "running %d locks for this lockres\n", mres->num_locks);
for (i=0; i<mres->num_locks; i++) {
ml = &(mres->ml[i]);
DLM_ASSERT(ml->highest_blocked == LKM_IVMODE);
@@ -1360,8 +1361,8 @@
/* lock is always created locally first, and
* destroyed locally last. it must be on the list */
if (!lock) {
- dlmprintk("could not find local lock with "
- "cookie %llu!\n", ml->cookie);
+ mlog(ML_ERROR, "could not find local lock "
+ "with cookie %llu!\n", ml->cookie);
BUG();
}
DLM_ASSERT(lock->ml.node == ml->node);
@@ -1375,7 +1376,7 @@
list_add_tail(&lock->list, queue);
spin_unlock(&res->spinlock);
- dlmprintk0("just reordered a local lock!\n");
+ mlog(0, "just reordered a local lock!\n");
continue;
}
@@ -1422,16 +1423,16 @@
list_add_tail(&newlock->list, queue);
spin_unlock(&res->spinlock);
}
- dlmprintk0("done running all the locks\n");
+ mlog(0, "done running all the locks\n");
leave:
if (ret < 0) {
- dlmprintk("error occurred while processing recovery "
- "data! %d\n", ret);
+ mlog_errno(ret);
if (newlock)
dlm_lock_put(newlock);
}
- dlmprintk("returning %d\n", ret);
+
+ mlog_exit(ret);
return ret;
}
@@ -1452,7 +1453,7 @@
struct list_head *iter, *iter2;
dlm_lock_resource *res;
- dlmprintk0("\n");
+ mlog_entry_void();
assert_spin_locked(&dlm->spinlock);
@@ -1561,7 +1562,7 @@
/* Clean up join state on node death. */
if (dlm->joining_node == idx) {
- dlmprintk("Clearing join state for node %u\n", idx);
+ mlog(0, "Clearing join state for node %u\n", idx);
__dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
}
@@ -1574,15 +1575,15 @@
if (!test_bit(idx, dlm->domain_map)) {
/* This also catches the case that we get a node down
* but haven't joined the domain yet. */
- dlmprintk("node %u already removed from domain!\n", idx);
+ mlog(0, "node %u already removed from domain!\n", idx);
goto bail;
}
- dlmprintk("node %u being removed from domain map!\n", idx);
+ mlog(0, "node %u being removed from domain map!\n", idx);
clear_bit(idx, dlm->domain_map);
if (test_bit(idx, dlm->recovery_map))
- dlmprintk("node %u already added to recovery map!\n", idx);
+ mlog(0, "node %u already added to recovery map!\n", idx);
else {
set_bit(idx, dlm->recovery_map);
dlm_do_local_recovery_cleanup(dlm, idx);
@@ -1636,15 +1637,15 @@
static void dlm_reco_ast(void *astdata)
{
- dlmprintk0("ast for recovery lock fired!\n");
+ mlog(0, "ast for recovery lock fired!\n");
}
static void dlm_reco_bast(void *astdata, int blocked_type)
{
- dlmprintk0("bast for recovery lock fired!\n");
+ mlog(0, "bast for recovery lock fired!\n");
}
static void dlm_reco_unlock_ast(void *astdata, dlm_status st)
{
- dlmprintk0("unlockast for recovery lock fired!\n");
+ mlog(0, "unlockast for recovery lock fired!\n");
}
@@ -1676,11 +1677,11 @@
* is actually "done" and the lock structure is
* even freed. we can continue, but only
* because this specific lock name is special. */
- dlmprintk("ack! dlmunlock returned %d\n", ret);
+ mlog(0, "dlmunlock returned %d\n", ret);
}
if (status < 0) {
- dlmprintk0("failed to send recovery message. "
+ mlog(0, "failed to send recovery message. "
"must retry with new node map.\n");
goto retry;
}
@@ -1701,9 +1702,9 @@
int nodenum;
int status;
- dlmprintk0("\n");
+ mlog_entry("%u\n", dead_node);
- dlmprintk("dead node is %u\n", dead_node);
+ mlog(0, "dead node is %u\n", dead_node);
spin_lock(&dlm->spinlock);
dlm_node_iter_init(dlm->domain_map, &iter);
@@ -1719,17 +1720,17 @@
while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
ret = 0;
if (nodenum == dead_node) {
- dlmprintk("not sending begin reco to dead node "
+ mlog(0, "not sending begin reco to dead node "
"%u\n", dead_node);
continue;
}
if (nodenum == dlm->node_num) {
- dlmprintk0("not sending begin reco to self\n");
+ mlog(0, "not sending begin reco to self\n");
continue;
}
ret = -EINVAL;
- dlmprintk("attempting to send begin reco msg to %d\n",
+ mlog(0, "attempting to send begin reco msg to %d\n",
nodenum);
ret = net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
&br, sizeof(br),
@@ -1737,8 +1738,7 @@
if (ret >= 0)
ret = status;
if (ret < 0) {
- dlmprintk("error occurred in "
- "net_send_message: %d\n", ret);
+ mlog_errno(ret);
break;
}
}
@@ -1757,16 +1757,15 @@
dlm_begin_reco_to_host(br);
- dlmprintk("node %u wants to recover node %u\n",
+ mlog(0, "node %u wants to recover node %u\n",
br->node_idx, br->dead_node);
spin_lock(&dlm->spinlock);
if (dlm->reco.new_master != NM_INVALID_SLOT_NUM) {
- dlmprintk("new_master already set to %u! "
- "that node had better be dead!!!\n",
+ mlog(0, "new_master already set to %u!\n",
dlm->reco.new_master);
}
if (dlm->reco.dead_node != NM_INVALID_SLOT_NUM) {
- dlmprintk("dead_node already set to %u!\n",
+ mlog(0, "dead_node already set to %u!\n",
dlm->reco.dead_node);
}
dlm->reco.new_master = br->node_idx;
@@ -1786,7 +1785,7 @@
int nodenum;
int status;
- dlmprintk("finishing recovery for node %u\n", dlm->reco.dead_node);
+ mlog(0, "finishing recovery for node %u\n", dlm->reco.dead_node);
spin_lock(&dlm->spinlock);
dlm_node_iter_init(dlm->domain_map, &iter);
@@ -1806,8 +1805,7 @@
if (ret >= 0)
ret = status;
if (ret < 0) {
- dlmprintk("error occurred in "
- "net_send_message: %d\n", ret);
+ mlog_errno(ret);
break;
}
}
@@ -1826,8 +1824,8 @@
dlm_finalize_reco_to_host(fr);
- dlmprintk("node %u finalizing recovery of node %u\n",
- fr->node_idx, fr->dead_node);
+ mlog(0, "node %u finalizing recovery of node %u\n",
+ fr->node_idx, fr->dead_node);
spin_lock(&dlm->spinlock);
Modified: trunk/fs/ocfs2/dlm/dlmthread.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmthread.c 2005-04-22 22:04:18 UTC (rev 2167)
+++ trunk/fs/ocfs2/dlm/dlmthread.c 2005-04-23 01:52:24 UTC (rev 2168)
@@ -49,6 +49,9 @@
#include "dlmapi.h"
#include "dlmcommon.h"
+#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD)
+#include "cluster/masklog.h"
+
extern spinlock_t dlm_domain_lock;
extern struct list_head dlm_domains;
@@ -114,21 +117,21 @@
* unused list or not and does the appropriate thing. */
static void __dlm_lockres_calc_usage(dlm_ctxt *dlm, dlm_lock_resource *res)
{
- dlmprintk0("\n");
+ mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&res->spinlock);
if (__dlm_lockres_unused(res) && list_empty(&res->purge)) {
- dlmprintk("putting lockres %.*s from purge list\n",
- res->lockname.len, res->lockname.name);
+ mlog(0, "putting lockres %.*s from purge list\n",
+ res->lockname.len, res->lockname.name);
res->last_used = jiffies;
list_add_tail(&res->purge, &dlm->purge_list);
dlm->purge_count++;
} else if (!list_empty(&res->purge)) {
- dlmprintk("removing lockres %.*s from purge list\n",
- res->lockname.len, res->lockname.name);
+ mlog(0, "removing lockres %.*s from purge list\n",
+ res->lockname.len, res->lockname.name);
list_del_init(&res->purge);
dlm->purge_count--;
@@ -138,7 +141,7 @@
void dlm_lockres_calc_usage(dlm_ctxt *dlm,
dlm_lock_resource *res)
{
- dlmprintk0("\n");
+ mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
spin_lock(&dlm->spinlock);
spin_lock(&res->spinlock);
@@ -155,13 +158,12 @@
int master;
int ret;
- dlmprintk0("\n");
spin_lock(&lockres->spinlock);
master = lockres->owner == dlm->node_num;
spin_unlock(&lockres->spinlock);
- dlmprintk("purging lockres %.*s, master = %d\n", lockres->lockname.len,
- lockres->lockname.name, master);
+ mlog(0, "purging lockres %.*s, master = %d\n", lockres->lockname.len,
+ lockres->lockname.name, master);
/* Non master is the easy case -- no migration required, just
* quit. */
@@ -174,11 +176,13 @@
ret = dlm_migrate_lockres(dlm, lockres, NM_MAX_NODES);
if (ret == -ENOTEMPTY) {
- dlmprintk0("lockres still has local locks! for "
- "now, this will BUG.\n");
+ mlog(ML_ERROR, "lockres %.*s still has local locks!\n",
+ lockres->lockname.len, lockres->lockname.name);
+
BUG();
} else if (ret < 0) {
- dlmprintk0("migrate failed, trying it again\n");
+ mlog(ML_NOTICE, "lockres %.*s: migrate failed, retrying\n",
+ lockres->lockname.len, lockres->lockname.name);
goto again;
}
@@ -235,9 +239,9 @@
/* This may drop and reacquire the dlm spinlock if it
* has to do migration. */
- dlmprintk0("calling dlm_purge_lockres!\n");
+ mlog(0, "calling dlm_purge_lockres!\n");
dlm_purge_lockres(dlm, lockres);
- dlmprintk0("DONE calling dlm_purge_lockres!\n");
+ mlog(0, "DONE calling dlm_purge_lockres!\n");
}
spin_unlock(&dlm->spinlock);
@@ -252,9 +256,9 @@
DLM_ASSERT(res);
- // dlmprintk("res->lockname.len=%d\n", res->lockname.len);
- // dlmprintk("res->lockname.name=%p\n", res->lockname.name);
- // dlmprintk("shuffle res %.*s\n", res->lockname.len,
+ //mlog(0, "res->lockname.len=%d\n", res->lockname.len);
+ //mlog(0, "res->lockname.name=%p\n", res->lockname.name);
+ //mlog(0, "shuffle res %.*s\n", res->lockname.len,
// res->lockname.name);
/* because this function is called with the lockres
@@ -269,13 +273,13 @@
converting:
if (list_empty(&res->converting))
goto blocked;
- dlmprintk("res %.*s has locks on a convert queue\n", res->lockname.len,
- res->lockname.name);
+ mlog(0, "res %.*s has locks on a convert queue\n", res->lockname.len,
+ res->lockname.name);
target = list_entry(res->converting.next, dlm_lock, list);
if (target->ml.convert_type == LKM_IVMODE) {
- dlmprintk0("eeek!!! converting a lock with no "
- "convert_type!!!!\n");
+ mlog(ML_ERROR, "%.*s: converting a lock with no "
+ "convert_type!\n", res->lockname.len, res->lockname.name);
BUG();
}
head = &res->granted;
@@ -320,10 +324,10 @@
spin_lock(&target->spinlock);
DLM_ASSERT(target->ml.highest_blocked == LKM_IVMODE);
- dlmprintk("calling ast for converting lock: %.*s, have: %d, "
- "granting: %d, node: %u\n", res->lockname.len,
- res->lockname.name, target->ml.type,
- target->ml.convert_type, target->ml.node);
+ mlog(0, "calling ast for converting lock: %.*s, have: %d, "
+ "granting: %d, node: %u\n", res->lockname.len,
+ res->lockname.name, target->ml.type,
+ target->ml.convert_type, target->ml.node);
target->ml.type = target->ml.convert_type;
target->ml.convert_type = LKM_IVMODE;
@@ -384,9 +388,9 @@
spin_lock(&target->spinlock);
DLM_ASSERT(target->ml.highest_blocked == LKM_IVMODE);
- dlmprintk("calling ast for blocked lock: %.*s, granting: %d, "
- "node: %u\n", res->lockname.len, res->lockname.name,
- target->ml.type, target->ml.node);
+ mlog(0, "calling ast for blocked lock: %.*s, granting: %d, "
+ "node: %u\n", res->lockname.len, res->lockname.name,
+ target->ml.type, target->ml.node);
// target->ml.type is already correct
list_del_init(&target->list);
@@ -410,7 +414,7 @@
/* must have NO locks when calling this */
void dlm_kick_thread(dlm_ctxt *dlm, dlm_lock_resource *res)
{
- dlmprintk("dlm=%p, res=%p\n", dlm, res);
+ mlog_entry("dlm=%p, res=%p\n", dlm, res);
if (res) {
spin_lock(&dlm->spinlock);
spin_lock(&res->spinlock);
@@ -432,13 +436,12 @@
/* Launch the NM thread for the mounted volume */
int dlm_launch_thread(dlm_ctxt *dlm)
{
- dlmprintk0("starting dlm thread...\n");
+ mlog(0, "starting dlm thread...\n");
dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm_thread");
if (IS_ERR(dlm->dlm_thread_task)) {
+ mlog_errno(PTR_ERR(dlm->dlm_thread_task));
dlm->dlm_thread_task = NULL;
- dlmprintk("unable to launch dlm thread, error=%ld",
- PTR_ERR(dlm->dlm_thread_task));
return -EINVAL;
}
@@ -448,7 +451,7 @@
void dlm_complete_thread(dlm_ctxt *dlm)
{
if (dlm->dlm_thread_task) {
- dlmprintk0("waiting for dlm thread to exit\n");
+ mlog(ML_KTHREAD, "waiting for dlm thread to exit\n");
kthread_stop(dlm->dlm_thread_task);
dlm->dlm_thread_task = NULL;
}
@@ -465,7 +468,6 @@
return empty;
}
-
int dlm_flush_lockres_asts(dlm_ctxt *dlm, dlm_lock_resource *res)
{
dlm_flush_asts(dlm);
@@ -473,10 +475,9 @@
return 0;
}
-
-
void dlm_flush_asts(dlm_ctxt *dlm)
{
+ int ret;
dlm_lock *lock;
dlm_lock_resource *res;
u8 hi;
@@ -488,7 +489,7 @@
/* get an extra ref on lock */
dlm_lock_get(lock);
res = lock->lockres;
- dlmprintk0("delivering an ast for this lockres\n");
+ mlog(0, "delivering an ast for this lockres\n");
DLM_ASSERT(lock->ast_pending);
@@ -498,8 +499,9 @@
spin_unlock(&dlm->ast_lock);
if (lock->ml.node != dlm->node_num) {
- if (dlm_do_remote_ast(dlm, res, lock) < 0)
- dlmprintk("eek\n");
+ ret = dlm_do_remote_ast(dlm, res, lock);
+ if (ret < 0)
+ mlog_errno(ret);
} else
dlm_do_local_ast(dlm, res, lock);
@@ -508,9 +510,9 @@
/* possible that another ast was queued while
* we were delivering the last one */
if (!list_empty(&lock->ast_list)) {
- dlmprintk0("aha another ast got queued while "
- "we were finishing the last one. will "
- "keep the ast_pending flag set.\n");
+ mlog(0, "aha another ast got queued while "
+ "we were finishing the last one. will "
+ "keep the ast_pending flag set.\n");
} else
lock->ast_pending = 0;
@@ -541,12 +543,13 @@
dlm_lock_put(lock);
spin_unlock(&dlm->ast_lock);
- dlmprintk("delivering a bast for this lockres "
- "(blocked = %d\n", hi);
+ mlog(0, "delivering a bast for this lockres "
+ "(blocked = %d\n", hi);
if (lock->ml.node != dlm->node_num) {
- if (dlm_send_proxy_bast(dlm, res, lock, hi) < 0)
- dlmprintk0("eeek\n");
+ ret = dlm_send_proxy_bast(dlm, res, lock, hi);
+ if (ret < 0)
+ mlog_errno(ret);
} else
dlm_do_local_bast(dlm, res, lock, hi);
@@ -555,9 +558,9 @@
/* possible that another bast was queued while
* we were delivering the last one */
if (!list_empty(&lock->bast_list)) {
- dlmprintk0("aha another bast got queued while "
- "we were finishing the last one. will "
- "keep the bast_pending flag set.\n");
+ mlog(0, "aha another bast got queued while "
+ "we were finishing the last one. will "
+ "keep the bast_pending flag set.\n");
} else
lock->bast_pending = 0;
@@ -580,7 +583,7 @@
dlm_ctxt *dlm = data;
unsigned long timeout = msecs_to_jiffies(DLM_THREAD_TIMEOUT_MS);
- dlmprintk("dlm thread running for %s...\n", dlm->name);
+ mlog(0, "dlm thread running for %s...\n", dlm->name);
while (!kthread_should_stop()) {
int n = DLM_THREAD_MAX_DIRTY;
@@ -617,10 +620,9 @@
if (res->state & DLM_LOCK_RES_IN_PROGRESS) {
/* move it to the tail and keep going */
spin_unlock(&res->spinlock);
- dlmprintk("delaying list shuffling for in-"
- "progress lockres %.*s\n",
- res->lockname.len,
- res->lockname.name);
+ mlog(0, "delaying list shuffling for in-"
+ "progress lockres %.*s\n",
+ res->lockname.len, res->lockname.name);
delay = 1;
goto in_progress;
}
@@ -630,8 +632,8 @@
* spinlock and do NOT have the dlm lock.
* safe to reserve/queue asts and run the lists. */
- dlmprintk("calling dlm_shuffle_lists with "
- "dlm=%p, res=%p\n", dlm, res);
+ mlog(0, "calling dlm_shuffle_lists with dlm=%p, "
+ "res=%p\n", dlm, res);
/* called while holding lockres lock */
dlm_shuffle_lists(dlm, res);
@@ -655,7 +657,7 @@
/* unlikely, but we may need to give time to
* other tasks */
if (!--n) {
- dlmprintk0("throttling dlm_thread\n");
+ mlog(0, "throttling dlm_thread\n");
break;
}
}
@@ -674,6 +676,6 @@
timeout);
}
- dlmprintk0("quitting DLM thread\n");
+ mlog(0, "quitting DLM thread\n");
return 0;
}
Modified: trunk/fs/ocfs2/dlm/dlmunlock.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmunlock.c 2005-04-22 22:04:18 UTC (rev 2167)
+++ trunk/fs/ocfs2/dlm/dlmunlock.c 2005-04-23 01:52:24 UTC (rev 2168)
@@ -47,6 +47,8 @@
#include "dlmapi.h"
#include "dlmcommon.h"
+#define MLOG_MASK_PREFIX ML_DLM
+#include "cluster/masklog.h"
#define DLM_UNLOCK_FREE_LOCK 0x00000001
#define DLM_UNLOCK_CALL_AST 0x00000002
@@ -87,8 +89,8 @@
int in_use;
u8 owner;
- dlmprintk("master_node = %d, valblk = %d\n", master_node,
- flags & LKM_VALBLK);
+ mlog(0, "master_node = %d, valblk = %d\n", master_node,
+ flags & LKM_VALBLK);
if (master_node)
DLM_ASSERT(res->owner == dlm->node_num);
@@ -101,16 +103,16 @@
in_use = !list_empty(&lock->ast_list);
spin_unlock(&dlm->spinlock);
if (in_use) {
- dlmprintk("lockres %.*s: Someone is calling dlmunlock while "
- "waiting for an ast!", res->lockname.len,
- res->lockname.name);
+ mlog(ML_ERROR, "lockres %.*s: Someone is calling dlmunlock "
+ "while waiting for an ast!", res->lockname.len,
+ res->lockname.name);
return DLM_BADPARAM;
}
spin_lock(&res->spinlock);
if (res->state & DLM_LOCK_RES_IN_PROGRESS) {
if (master_node) {
- dlmprintk0("lockres in progress!\n");
+ mlog(ML_ERROR, "lockres in progress!\n");
spin_unlock(&res->spinlock);
return DLM_FORWARD;
}
@@ -189,9 +191,8 @@
if (actions & DLM_UNLOCK_FREE_LOCK) {
/* this should always be coupled with list removal */
DLM_ASSERT(actions & DLM_UNLOCK_REMOVE_LOCK);
- dlmprintk("lock %llu should be gone now! refs=%d\n",
- lock->ml.cookie,
- atomic_read(&lock->lock_refs.refcount));
+ mlog(0, "lock %llu should be gone now! refs=%d\n",
+ lock->ml.cookie, atomic_read(&lock->lock_refs.refcount));
}
if (actions & DLM_UNLOCK_CALL_AST)
*call_ast = 1;
@@ -243,7 +244,7 @@
struct iovec iov[2];
size_t iovlen = 1;
- dlmprintk0("\n");
+ mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
memset(&unlock, 0, sizeof(unlock));
unlock.node_idx = dlm->node_num;
@@ -270,14 +271,13 @@
if (status == DLM_CANCELGRANT)
ret = DLM_NORMAL;
else if (status == DLM_FORWARD) {
- dlmprintk0("master was in-progress. retry\n");
+ mlog(0, "master was in-progress. retry\n");
ret = DLM_FORWARD;
} else
ret = status;
lksb->status = status;
} else {
- dlmprintk("error occurred in net_send_message: %d\n",
- tmpret);
+ mlog_errno(tmpret);
ret = dlm_err_to_dlm_status(tmpret);
lksb->status = ret;
}
@@ -311,18 +311,18 @@
flags = unlock->flags;
if (flags & LKM_GET_LVB) {
- dlmprintk0("bad args! GET_LVB specified on unlock!\n");
+ mlog(ML_ERROR, "bad args! GET_LVB specified on unlock!\n");
return DLM_BADARGS;
}
if ((flags & (LKM_PUT_LVB|LKM_CANCEL)) == (LKM_PUT_LVB|LKM_CANCEL)) {
- dlmprintk0("bad args! cannot modify lvb on a CANCEL "
- "request!\n");
+ mlog(ML_ERROR, "bad args! cannot modify lvb on a CANCEL "
+ "request!\n");
return DLM_BADARGS;
}
if (unlock->namelen > DLM_LOCKID_NAME_MAX) {
- printk("Invalid name length in unlock handler!\n");
+ mlog(ML_ERROR, "Invalid name length in unlock handler!\n");
return DLM_IVBUFLEN;
}
@@ -331,14 +331,14 @@
DLM_ASSERT(dlm_domain_fully_joined(dlm));
- dlmprintk("lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : "none");
+ mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : "none");
res = dlm_lookup_lockres(dlm, unlock->name, unlock->namelen);
if (!res) {
/* We assume here that a no lock resource simply means
* it was migrated away and destroyed before the other
* node could detect it. */
- dlmprintk0("returning DLM_FORWARD -- res no longer exists\n");
+ mlog(0, "returning DLM_FORWARD -- res no longer exists\n");
status = DLM_FORWARD;
goto not_found;
}
@@ -348,21 +348,21 @@
spin_lock(&res->spinlock);
if (res->state & DLM_LOCK_RES_RECOVERING) {
spin_unlock(&res->spinlock);
- dlmprintk0("returning DLM_RECOVERING\n");
+ mlog(0, "returning DLM_RECOVERING\n");
status = DLM_RECOVERING;
goto leave;
}
if (res->state & DLM_LOCK_RES_MIGRATING) {
spin_unlock(&res->spinlock);
- dlmprintk0("returning DLM_MIGRATING\n");
+ mlog(0, "returning DLM_MIGRATING\n");
status = DLM_MIGRATING;
goto leave;
}
if (res->owner != dlm->node_num) {
spin_unlock(&res->spinlock);
- dlmprintk0("returning DLM_FORWARD -- not master\n");
+ mlog(0, "returning DLM_FORWARD -- not master\n");
status = DLM_FORWARD;
goto leave;
}
@@ -400,7 +400,7 @@
* all the way back out */
status = dlmunlock_master(dlm, res, lock, lksb, flags, &ignore);
if (status == DLM_FORWARD)
- dlmprintk0("lockres is in progress\n");
+ mlog(0, "lockres is in progress\n");
if (flags & LKM_PUT_LVB)
lksb->flags &= ~DLM_LKSB_PUT_LVB;
@@ -409,8 +409,8 @@
not_found:
if (!found)
- dlmprintk("failed to find lock to unlock! cookie=%llu\n",
- unlock->cookie);
+ mlog(ML_ERROR, "failed to find lock to unlock! cookie=%llu\n",
+ unlock->cookie);
else {
/* send the lksb->status back to the other node */
status = lksb->status;
@@ -453,8 +453,7 @@
status = DLM_NORMAL;
*actions = DLM_UNLOCK_CALL_AST;
} else {
- /* err. um. eek! */
- dlmprintk0("lock to cancel is not on any list! bug!\n");
+ mlog(ML_ERROR, "lock to cancel is not on any list!\n");
lksb->status = DLM_IVLOCKID;
status = DLM_IVLOCKID;
*actions = 0;
@@ -496,7 +495,7 @@
dlm_lock *lock = NULL;
int call_ast, is_master;
- dlmprintk0("\n");
+ mlog_entry_void();
if (!lksb)
return DLM_BADARGS;
@@ -505,7 +504,7 @@
return DLM_BADPARAM;
if ((flags & (LKM_VALBLK | LKM_CANCEL)) == (LKM_VALBLK | LKM_CANCEL)) {
- dlmprintk0("VALBLK given with CANCEL: ignoring VALBLK\n");
+ mlog(0, "VALBLK given with CANCEL: ignoring VALBLK\n");
flags &= ~LKM_VALBLK;
}
@@ -522,7 +521,7 @@
retry:
call_ast = 0;
/* need to retry up here because owner may have changed */
- dlmprintk("lock=%p res=%p\n", lock, res);
+ mlog(0, "lock=%p res=%p\n", lock, res);
spin_lock(&res->spinlock);
is_master = (res->owner == dlm->node_num);
@@ -531,13 +530,13 @@
if (is_master) {
status = dlmunlock_master(dlm, res, lock, lksb, flags,
&call_ast);
- dlmprintk("done calling dlmunlock_master: returned %d, "
- "call_ast is %d\n", status, call_ast);
+ mlog(0, "done calling dlmunlock_master: returned %d, "
+ "call_ast is %d\n", status, call_ast);
} else {
status = dlmunlock_remote(dlm, res, lock, lksb, flags,
&call_ast);
- dlmprintk("done calling dlmunlock_remote: returned %d, "
- "call_ast is %d\n", status, call_ast);
+ mlog(0, "done calling dlmunlock_remote: returned %d, "
+ "call_ast is %d\n", status, call_ast);
}
if (status == DLM_RECOVERING ||
@@ -553,19 +552,18 @@
/* do we want to yield(); ?? */
msleep(50);
- dlmprintk0("retrying unlock due to pending recovery/"
- "migration/in-progress\n");
+ mlog(0, "retrying unlock due to pending recovery/"
+ "migration/in-progress\n");
goto retry;
}
if (call_ast) {
- dlmprintk("calling unlockast(%p, %d)\n",
- data, lksb->status);
+ mlog(0, "calling unlockast(%p, %d)\n", data, lksb->status);
(*unlockast)(data, lksb->status);
}
if (status == DLM_NORMAL) {
- dlmprintk("kicking the thread\n");
+ mlog(0, "kicking the thread\n");
dlm_kick_thread(dlm, res);
}
@@ -573,7 +571,7 @@
dlm_lockres_put(res);
dlm_lock_put(lock);
- dlmprintk("returning status=%d!\n", status);
+ mlog(0, "returning status=%d!\n", status);
return status;
}
EXPORT_SYMBOL(dlmunlock);
More information about the Ocfs2-commits
mailing list