[Ocfs2-commits] khackel commits r2448 - in trunk/fs/ocfs2: . dlm

svn-commits at oss.oracle.com svn-commits at oss.oracle.com
Wed Jun 29 19:30:34 CDT 2005


Author: khackel
Signed-off-by: manish
Date: 2005-06-29 19:30:29 -0500 (Wed, 29 Jun 2005)
New Revision: 2448

Modified:
   trunk/fs/ocfs2/dlm/dlmapi.h
   trunk/fs/ocfs2/dlm/dlmast.c
   trunk/fs/ocfs2/dlm/dlmcommon.h
   trunk/fs/ocfs2/dlm/dlmconvert.c
   trunk/fs/ocfs2/dlm/dlmconvert.h
   trunk/fs/ocfs2/dlm/dlmdebug.c
   trunk/fs/ocfs2/dlm/dlmdebug.h
   trunk/fs/ocfs2/dlm/dlmdomain.c
   trunk/fs/ocfs2/dlm/dlmdomain.h
   trunk/fs/ocfs2/dlm/dlmfs.c
   trunk/fs/ocfs2/dlm/dlmlock.c
   trunk/fs/ocfs2/dlm/dlmmaster.c
   trunk/fs/ocfs2/dlm/dlmrecovery.c
   trunk/fs/ocfs2/dlm/dlmthread.c
   trunk/fs/ocfs2/dlm/dlmunlock.c
   trunk/fs/ocfs2/dlm/userdlm.c
   trunk/fs/ocfs2/dlm/userdlm.h
   trunk/fs/ocfs2/dlmglue.c
   trunk/fs/ocfs2/ocfs2.h
Log:
* removes all typedef structs and enums from dlm
* cleans up whitespace at end of line
* fixes a few cases of lines > 80 column

Signed-off-by: manish



Modified: trunk/fs/ocfs2/dlm/dlmapi.h
===================================================================
--- trunk/fs/ocfs2/dlm/dlmapi.h	2005-06-29 22:47:38 UTC (rev 2447)
+++ trunk/fs/ocfs2/dlm/dlmapi.h	2005-06-30 00:30:29 UTC (rev 2448)
@@ -6,7 +6,7 @@
  * externally exported dlm interfaces
  *
  * Copyright (C) 2004 Oracle.  All rights reserved.
- *
+ * 
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public
  * License as published by the Free Software Foundation; either
@@ -27,14 +27,11 @@
 #ifndef DLMAPI_H
 #define DLMAPI_H
 
-struct _dlm_lock;
-typedef struct _dlm_lock dlm_lock;
+struct dlm_lock;
+struct dlm_ctxt;
 
-struct _dlm_ctxt;
-typedef struct _dlm_ctxt dlm_ctxt;
-
 /* NOTE: changes made to this enum should be reflected in dlmdebug.c */
-typedef enum _dlm_status {
+enum dlm_status {
 	DLM_NORMAL = 0,           /*  0: request in progress */
 	DLM_GRANTED,              /*  1: request granted */
 	DLM_DENIED,               /*  2: request denied */
@@ -74,22 +71,23 @@
 	DLM_BAD_DEVICE_PATH,      /* 36: Locks device does not exist or path wrong */
 	DLM_NO_DEVICE_PERMISSION, /* 37: Client has insufficient pers for device */
 	DLM_NO_CONTROL_DEVICE,    /* 38: Cannot set options on opened device */
-	
-	DLM_RECOVERING,           /* 39: our lame addition to allow caller to fail a lock 
+
+	DLM_RECOVERING,           /* 39: extension, allows caller to fail a lock
 				     request if it is being recovered */
-	DLM_MIGRATING,            /* 40: our lame addition to allow caller to fail a lock 
+	DLM_MIGRATING,            /* 40: extension, allows caller to fail a lock
 				     request if it is being migrated */
 	DLM_MAXSTATS,             /* 41: upper limit for return code validation */
-} dlm_status;
+};
 
 /* for pretty-printing dlm_status error messages */
-const char *dlm_errmsg(dlm_status err);
+const char *dlm_errmsg(enum dlm_status err);
 /* for pretty-printing dlm_status error names */
-const char *dlm_errname(dlm_status err);
+const char *dlm_errname(enum dlm_status err);
 
 
 
-#define DLM_LKSB_KERNEL_ALLOCATED  0x01  // allocated on master node on behalf of remote node
+#define DLM_LKSB_KERNEL_ALLOCATED  0x01  /* allocated on master node on
+					    behalf of remote node */
 #define DLM_LKSB_PUT_LVB           0x02
 #define DLM_LKSB_GET_LVB           0x04
 #define DLM_LKSB_UNUSED2           0x08
@@ -102,34 +100,32 @@
 
 /* Callers are only allowed access to the lvb and status members of
  * this struct. */
-struct _dlm_lockstatus {
-	dlm_status status;
-	u32 flags;           
-	dlm_lock *lockid;
+struct dlm_lockstatus {
+	enum dlm_status status;
+	u32 flags;
+	struct dlm_lock *lockid;
 	char lvb[DLM_LVB_LEN];
 };
 
-typedef struct _dlm_lockstatus dlm_lockstatus;
-
 /* Valid lock modes. */
 #define LKM_IVMODE      (-1)            /* invalid mode */
 #define LKM_NLMODE      0               /* null lock */
-#define LKM_CRMODE      1               /* concurrent read */    /* unsupported */
-#define LKM_CWMODE      2               /* concurrent write */    /* unsupported */
+#define LKM_CRMODE      1               /* concurrent read    unsupported */
+#define LKM_CWMODE      2               /* concurrent write   unsupported */
 #define LKM_PRMODE      3               /* protected read */
-#define LKM_PWMODE      4               /* protected write */    /* unsupported */
+#define LKM_PWMODE      4               /* protected write    unsupported */
 #define LKM_EXMODE      5               /* exclusive */
 #define LKM_MAXMODE     5
 #define LKM_MODEMASK    0xff
 
 /* Flags passed to dlmlock and dlmunlock:
  * reserved: flags used by the "real" dlm
- * only a few are supported by this dlm 
+ * only a few are supported by this dlm
  * (U) = unsupported by ocfs2 dlm */
 #define LKM_ORPHAN       0x00000010  /* this lock is orphanable (U) */
 #define LKM_PARENTABLE   0x00000020  /* this lock was orphaned (U) */
 #define LKM_BLOCK        0x00000040  /* blocking lock request (U) */
-#define LKM_LOCAL        0x00000080  /* local lock request */    
+#define LKM_LOCAL        0x00000080  /* local lock request */
 #define LKM_VALBLK       0x00000100  /* lock value block request */
 #define LKM_NOQUEUE      0x00000200  /* non blocking request */
 #define LKM_CONVERT      0x00000400  /* conversion request */
@@ -146,7 +142,7 @@
 #define LKM_XID          0x00200000  /* use transaction id for deadlock (U) */
 #define LKM_XID_CONFLICT 0x00400000  /* do not allow lock inheritance (U) */
 #define LKM_FORCE        0x00800000  /* force unlock flag */
-#define LKM_REVVALBLK    0x01000000  /* temporary solution: re-validate 
+#define LKM_REVVALBLK    0x01000000  /* temporary solution: re-validate
 					lock value block (U) */
 /* unused */
 #define LKM_UNUSED1      0x00000001  /* unused */
@@ -163,7 +159,7 @@
 					to another node */
 #define LKM_PUT_LVB      0x20000000  /* extension: lvb is being passed
 					should be applied to lockres */
-#define LKM_GET_LVB      0x40000000  /* extension: lvb should be copied 
+#define LKM_GET_LVB      0x40000000  /* extension: lvb should be copied
 					from lockres when lock is granted */
 #define LKM_RECOVERY     0x80000000  /* extension: flag for recovery lock
 					used to avoid recovery rwsem */
@@ -171,29 +167,28 @@
 
 typedef void (dlm_astlockfunc_t)(void *);
 typedef void (dlm_bastlockfunc_t)(void *, int);
-typedef void (dlm_astunlockfunc_t)(void *, dlm_status);
+typedef void (dlm_astunlockfunc_t)(void *, enum dlm_status);
 
-dlm_status dlmlock(dlm_ctxt *dlm,
-		   int mode,
-		   dlm_lockstatus *lksb,
-		   int flags,
-		   const char *name,
-		   dlm_astlockfunc_t *ast,
-		   void *data,
-		   dlm_bastlockfunc_t *bast);
+enum dlm_status dlmlock(struct dlm_ctxt *dlm,
+			int mode,
+			struct dlm_lockstatus *lksb,
+			int flags,
+			const char *name,
+			dlm_astlockfunc_t *ast,
+			void *data,
+			dlm_bastlockfunc_t *bast);
 
-dlm_status dlmunlock(dlm_ctxt *dlm,
-		     dlm_lockstatus *lksb,
-		     int flags,
-		     dlm_astunlockfunc_t *unlockast,
-		     void *data);
+enum dlm_status dlmunlock(struct dlm_ctxt *dlm,
+			  struct dlm_lockstatus *lksb,
+			  int flags,
+			  dlm_astunlockfunc_t *unlockast,
+			  void *data);
 
-dlm_ctxt * dlm_register_domain(const char *domain,
-			       u32 key);
+struct dlm_ctxt * dlm_register_domain(const char *domain, u32 key);
 
-void dlm_unregister_domain(dlm_ctxt *dlm);
+void dlm_unregister_domain(struct dlm_ctxt *dlm);
 
-void dlm_print_one_lock(dlm_lock *lockid);
+void dlm_print_one_lock(struct dlm_lock *lockid);
 
 typedef void (dlm_eviction_func)(int, void *);
 struct dlm_eviction_cb {
@@ -204,7 +199,7 @@
 void dlm_setup_eviction_cb(struct dlm_eviction_cb *cb,
 			   dlm_eviction_func *f,
 			   void *data);
-void dlm_register_eviction_cb(dlm_ctxt *dlm,
+void dlm_register_eviction_cb(struct dlm_ctxt *dlm,
 			      struct dlm_eviction_cb *cb);
 void dlm_unregister_eviction_cb(struct dlm_eviction_cb *cb);
 

Modified: trunk/fs/ocfs2/dlm/dlmast.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmast.c	2005-06-29 22:47:38 UTC (rev 2447)
+++ trunk/fs/ocfs2/dlm/dlmast.c	2005-06-30 00:30:29 UTC (rev 2448)
@@ -50,20 +50,20 @@
 #define MLOG_MASK_PREFIX ML_DLM
 #include "cluster/masklog.h"
 
-static void dlm_update_lvb(dlm_ctxt *dlm, dlm_lock_resource *res,
-			   dlm_lock *lock);
-static int dlm_should_cancel_bast(dlm_ctxt *dlm, dlm_lock *lock);
+static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+			   struct dlm_lock *lock);
+static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
 
 /* Should be called as an ast gets queued to see if the new
  * lock level will obsolete a pending bast.
- * For example, if dlm_thread queued a bast for an EX lock that 
- * was blocking another EX, but before sending the bast the 
- * lock owner downconverted to NL, the bast is now obsolete. 
- * Only the ast should be sent. 
+ * For example, if dlm_thread queued a bast for an EX lock that
+ * was blocking another EX, but before sending the bast the
+ * lock owner downconverted to NL, the bast is now obsolete.
+ * Only the ast should be sent.
  * This is needed because the lock and convert paths can queue
- * asts out-of-band (not waiting for dlm_thread) in order to 
+ * asts out-of-band (not waiting for dlm_thread) in order to
  * allow for LKM_NOQUEUE to get immediate responses. */
-static int dlm_should_cancel_bast(dlm_ctxt *dlm, dlm_lock *lock)
+static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
 {
 	assert_spin_locked(&dlm->ast_lock);
 	assert_spin_locked(&lock->spinlock);
@@ -72,11 +72,11 @@
 		return 0;
 	BUG_ON(lock->ml.highest_blocked == LKM_NLMODE);
 
-	if (lock->bast_pending && 
+	if (lock->bast_pending &&
 	    list_empty(&lock->bast_list))
 		/* old bast already sent, ok */
 		return 0;
-	
+
 	if (lock->ml.type == LKM_EXMODE)
 		/* EX blocks anything left, any bast still valid */
 		return 0;
@@ -90,7 +90,7 @@
 	return 0;
 }
 
-void __dlm_queue_ast(dlm_ctxt *dlm, dlm_lock *lock)
+void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
 {
 	mlog_entry_void();
 
@@ -117,7 +117,7 @@
 	spin_unlock(&lock->spinlock);
 }
 
-void dlm_queue_ast(dlm_ctxt *dlm, dlm_lock *lock)
+void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
 {
 	mlog_entry_void();
 
@@ -130,7 +130,7 @@
 }
 
 
-void __dlm_queue_bast(dlm_ctxt *dlm, dlm_lock *lock)
+void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
 {
 	mlog_entry_void();
 
@@ -150,7 +150,7 @@
 	spin_unlock(&lock->spinlock);
 }
 
-void dlm_queue_bast(dlm_ctxt *dlm, dlm_lock *lock)
+void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
 {
 	mlog_entry_void();
 
@@ -162,10 +162,10 @@
 	spin_unlock(&dlm->ast_lock);
 }
 
-static void dlm_update_lvb(dlm_ctxt *dlm, dlm_lock_resource *res,
-			   dlm_lock *lock)
+static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+			   struct dlm_lock *lock)
 {
-	dlm_lockstatus *lksb = lock->lksb;
+	struct dlm_lockstatus *lksb = lock->lksb;
 	BUG_ON(!lksb);
 
 	/* only updates if this node masters the lockres */
@@ -191,10 +191,11 @@
 	lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB);
 }
 
-void dlm_do_local_ast(dlm_ctxt *dlm, dlm_lock_resource *res, dlm_lock *lock)
+void dlm_do_local_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+		      struct dlm_lock *lock)
 {
 	dlm_astlockfunc_t *fn;
-	dlm_lockstatus *lksb;
+	struct dlm_lockstatus *lksb;
 
 	mlog_entry_void();
 
@@ -207,10 +208,11 @@
 }
 
 
-int dlm_do_remote_ast(dlm_ctxt *dlm, dlm_lock_resource *res, dlm_lock *lock)
+int dlm_do_remote_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+		      struct dlm_lock *lock)
 {
 	int ret;
-	dlm_lockstatus *lksb;
+	struct dlm_lockstatus *lksb;
 	int lksbflags;
 
 	mlog_entry_void();
@@ -220,22 +222,22 @@
 
 	lksbflags = lksb->flags;
 	dlm_update_lvb(dlm, res, lock);
-		
+
 	/* lock request came from another node
 	 * go do the ast over there */
 	ret = dlm_send_proxy_ast(dlm, res, lock, lksbflags);
 	return ret;
 }
 
-void dlm_do_local_bast(dlm_ctxt *dlm, dlm_lock_resource *res, 
-		       dlm_lock *lock, int blocked_type)
+void dlm_do_local_bast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+		       struct dlm_lock *lock, int blocked_type)
 {
 	dlm_bastlockfunc_t *fn = lock->bast;
 
 	mlog_entry_void();
 
 	BUG_ON(lock->ml.node != dlm->node_num);
-	
+
 	(*fn)(lock->astdata, blocked_type);
 }
 
@@ -245,10 +247,10 @@
 {
 	int ret;
 	unsigned int locklen;
-	dlm_ctxt *dlm = data;
-	dlm_lock_resource *res = NULL;
-	dlm_lock *lock = NULL;
-	dlm_proxy_ast *past = (dlm_proxy_ast *) msg->buf;
+	struct dlm_ctxt *dlm = data;
+	struct dlm_lock_resource *res = NULL;
+	struct dlm_lock *lock = NULL;
+	struct dlm_proxy_ast *past = (struct dlm_proxy_ast *) msg->buf;
 	char *name;
 	struct list_head *iter, *head=NULL;
 	u64 cookie;
@@ -257,7 +259,7 @@
 	if (!dlm_grab(dlm))
 		return DLM_REJECTED;
 
-	mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), 
+	mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
 			"Domain %s not fully joined!\n", dlm->name);
 
 	dlm_proxy_ast_to_host(past);
@@ -279,12 +281,12 @@
 		goto leave;
 	}
 
-	mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : 
+	mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" :
 		  (flags & LKM_GET_LVB ? "get lvb" : "none"));
 
 	mlog(0, "type=%d, blocked_type=%d\n", past->type, past->blocked_type);
 
-	if (past->type != DLM_AST && 
+	if (past->type != DLM_AST &&
 	    past->type != DLM_BAST) {
 		mlog(ML_ERROR, "Unknown ast type! %d, cookie=%"MLFu64", "
 		     "name=%.*s\n", past->type, cookie, locklen, name);
@@ -295,8 +297,8 @@
 	res = dlm_lookup_lockres(dlm, name, locklen);
 	if (!res) {
 		mlog(ML_ERROR, "got %sast for unknown lockres! "
-			       "cookie=%"MLFu64", name=%.*s, namelen=%u\n", 
-		     past->type == DLM_AST ? "" : "b", 
+			       "cookie=%"MLFu64", name=%.*s, namelen=%u\n",
+		     past->type == DLM_AST ? "" : "b",
 		     cookie, locklen, name, locklen);
 		ret = DLM_IVLOCKID;
 		goto leave;
@@ -322,7 +324,7 @@
 	head = &res->converting;
 	lock = NULL;
 	list_for_each(iter, head) {
-		lock = list_entry (iter, dlm_lock, list);
+		lock = list_entry (iter, struct dlm_lock, list);
 		if (lock->ml.cookie == cookie)
 			goto do_ast;
 	}
@@ -330,11 +332,11 @@
 	/* if not on convert, try blocked for ast, granted for bast */
 	if (past->type == DLM_AST)
 		head = &res->blocked;
-	else 
+	else
 		head = &res->granted;
 
 	list_for_each(iter, head) {
-		lock = list_entry (iter, dlm_lock, list);
+		lock = list_entry (iter, struct dlm_lock, list);
 		if (lock->ml.cookie == cookie)
 			goto do_ast;
 	}
@@ -372,7 +374,7 @@
 		}
 	}
 	spin_unlock(&res->spinlock);
-	
+
 	if (past->type == DLM_AST)
 		dlm_do_local_ast(dlm, res, lock);
 	else
@@ -389,21 +391,21 @@
 
 
 
-int dlm_send_proxy_ast_msg(dlm_ctxt *dlm, dlm_lock_resource *res, 
-			   dlm_lock *lock, int msg_type, 
+int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+			   struct dlm_lock *lock, int msg_type,
 			   int blocked_type, int flags)
 {
 	int ret = 0;
-	dlm_proxy_ast past;
+	struct dlm_proxy_ast past;
 	struct iovec iov[2];
 	size_t iovlen = 1;
 	int status;
 
 	mlog_entry("res %.*s, to=%u, type=%d, blocked_type=%d\n",
-		   res->lockname.len, res->lockname.name, lock->ml.node, 
+		   res->lockname.len, res->lockname.name, lock->ml.node,
 		   msg_type, blocked_type);
 
-	memset(&past, 0, sizeof(dlm_proxy_ast));
+	memset(&past, 0, sizeof(struct dlm_proxy_ast));
 	past.node_idx = dlm->node_num;
 	past.type = msg_type;
 	past.blocked_type = blocked_type;
@@ -411,7 +413,7 @@
 	memcpy(past.name, res->lockname.name, past.namelen);
 	past.cookie = lock->ml.cookie;
 
-	iov[0].iov_len = sizeof(dlm_proxy_ast);
+	iov[0].iov_len = sizeof(struct dlm_proxy_ast);
 	iov[0].iov_base = &past;
 	if (flags & DLM_LKSB_GET_LVB) {
 		mlog(0, "returning requested LVB data\n");
@@ -429,7 +431,7 @@
 	else {
 		if (status == DLM_RECOVERING) {
 			mlog(ML_ERROR, "sent AST to node %u, it thinks this "
-			     "node is dead!\n", lock->ml.node); 
+			     "node is dead!\n", lock->ml.node);
 			BUG();
 		} else if (status == DLM_MIGRATING) {
 			mlog(ML_ERROR, "sent AST to node %u, it returned "

Modified: trunk/fs/ocfs2/dlm/dlmcommon.h
===================================================================
--- trunk/fs/ocfs2/dlm/dlmcommon.h	2005-06-29 22:47:38 UTC (rev 2447)
+++ trunk/fs/ocfs2/dlm/dlmcommon.h	2005-06-30 00:30:29 UTC (rev 2448)
@@ -28,7 +28,7 @@
 #include <linux/kref.h>
 
 #define DLM_HB_NODE_DOWN_PRI     (0xf000000)
-#define DLM_HB_NODE_UP_PRI       (0x8000000)  
+#define DLM_HB_NODE_UP_PRI       (0x8000000)
 
 #define DLM_LOCKID_NAME_MAX    32
 
@@ -41,11 +41,11 @@
 #define DLM_HASH_SIZE     (1 << DLM_HASH_BITS)
 #define DLM_HASH_MASK     (DLM_HASH_SIZE - 1)
 
-typedef enum _dlm_ast_type {
+enum dlm_ast_type {
 	DLM_AST = 0,
 	DLM_BAST,
 	DLM_ASTUNLOCK
-} dlm_ast_type;
+};
 
 
 #define LKM_VALID_FLAGS (LKM_VALBLK | LKM_CONVERT | LKM_UNLOCK | \
@@ -65,26 +65,26 @@
 
 #define DLM_RECO_STATE_ACTIVE  0x0001
 
-typedef struct _dlm_recovery_ctxt
+struct dlm_recovery_ctxt
 {
 	struct list_head resources;
-	struct list_head received;   // list of dlm_reco_lock_infos received from other nodes during recovery
+	struct list_head received;
 	struct list_head node_data;
 	u8  new_master;
 	u8  dead_node;
 	u16 state;
 	unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
 	wait_queue_head_t event;
-} dlm_recovery_ctxt;
+};
 
-typedef enum _dlm_ctxt_state {
+enum dlm_ctxt_state {
 	DLM_CTXT_NEW = 0,
 	DLM_CTXT_JOINED,
 	DLM_CTXT_IN_SHUTDOWN,
 	DLM_CTXT_LEAVING,
-} dlm_ctxt_state;
+};
 
-struct _dlm_ctxt
+struct dlm_ctxt
 {
 	struct list_head list;
 	struct list_head *resources;
@@ -95,7 +95,6 @@
 	unsigned int purge_count;
 	spinlock_t spinlock;
 	spinlock_t ast_lock;
-	struct rw_semaphore recovery_sem;
 	char *name;
 	u8 node_num;
 	u32 key;
@@ -104,7 +103,7 @@
 	unsigned long live_nodes_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
 	unsigned long domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
 	unsigned long recovery_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
-	dlm_recovery_ctxt reco;
+	struct dlm_recovery_ctxt reco;
 	spinlock_t master_lock;
 	struct list_head master_list;
 	struct list_head mle_hb_events;
@@ -116,7 +115,7 @@
 
 	/* NOTE: Next three are protected by dlm_domain_lock */
 	struct kref dlm_refs;
-	dlm_ctxt_state dlm_state;
+	enum dlm_ctxt_state dlm_state;
 	unsigned int num_joins;
 
 	struct o2hb_callback_func dlm_hb_up;
@@ -129,59 +128,60 @@
 	struct work_struct dispatched_work;
 	struct list_head work_list;
 	spinlock_t work_lock;
-	struct list_head	dlm_domain_handlers;
+	struct list_head dlm_domain_handlers;
 	struct list_head	dlm_eviction_callbacks;
 };
 
-/* these keventd work queue items are for less-frequently 
+/* these keventd work queue items are for less-frequently
  * called functions that cannot be directly called from the
  * net message handlers for some reason, usually because
  * they need to send net messages of their own. */
 void dlm_dispatch_work(void *data);
 
-typedef struct _dlm_lock_resource dlm_lock_resource;
-typedef struct _dlm_work_item dlm_work_item;
+struct dlm_lock_resource;
+struct dlm_work_item;
 
-typedef void (dlm_workfunc_t)(dlm_work_item *, void *);
+typedef void (dlm_workfunc_t)(struct dlm_work_item *, void *);
 
-typedef struct _dlm_request_all_locks_priv
+struct dlm_request_all_locks_priv
 {
 	u8 reco_master;
 	u8 dead_node;
-} dlm_request_all_locks_priv;
+};
 
-typedef struct _dlm_mig_lockres_priv
+struct dlm_mig_lockres_priv
 {
-	dlm_lock_resource *lockres;
+	struct dlm_lock_resource *lockres;
 	u8 real_master;
-} dlm_mig_lockres_priv;
+};
 
-typedef struct _dlm_assert_master_priv
+struct dlm_assert_master_priv
 {
-	dlm_lock_resource *lockres;
+	struct dlm_lock_resource *lockres;
 	u8 request_from;
 	u32 flags;
 	unsigned ignore_higher:1;
-} dlm_assert_master_priv;
+};
 
 
-struct _dlm_work_item 
+struct dlm_work_item
 {
 	struct list_head list;
 	dlm_workfunc_t *func;
-	dlm_ctxt *dlm;
+	struct dlm_ctxt *dlm;
 	void *data;
 	union {
-		dlm_request_all_locks_priv ral;
-		dlm_mig_lockres_priv ml;
-		dlm_assert_master_priv am;
+		struct dlm_request_all_locks_priv ral;
+		struct dlm_mig_lockres_priv ml;
+		struct dlm_assert_master_priv am;
 	} u;
 };
 
-static inline void dlm_init_work_item(dlm_ctxt *dlm, dlm_work_item *i, 
+static inline void dlm_init_work_item(struct dlm_ctxt *dlm,
+				      struct dlm_work_item *i,
 				      dlm_workfunc_t *f, void *data)
 {
-	memset(i, 0, sizeof(dlm_work_item));
+	memset(i, 0, sizeof(*i));
 	i->func = f;
 	INIT_LIST_HEAD(&i->list);
 	i->data = data;
@@ -190,7 +190,7 @@
 
 
 
-static inline void __dlm_set_joining_node(struct _dlm_ctxt *dlm,
+static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm,
 					  u8 node)
 {
 	assert_spin_locked(&dlm->spinlock);
@@ -203,19 +203,19 @@
 #define DLM_LOCK_RES_RECOVERING           0x00000002
 #define DLM_LOCK_RES_READY                0x00000004
 #define DLM_LOCK_RES_DIRTY                0x00000008
-#define DLM_LOCK_RES_IN_PROGRESS          0x00000010 
+#define DLM_LOCK_RES_IN_PROGRESS          0x00000010
 #define DLM_LOCK_RES_MIGRATING            0x00000020
 
 #define DLM_PURGE_INTERVAL_MS   (8 * 1000)
 
-struct _dlm_lock_resource
+struct dlm_lock_resource
 {
 	/* WARNING: Please see the comment in dlm_init_lockres before
 	 * adding fields here. */
 	struct list_head list;
 	struct kref      refs;
 
-	/* please keep these next 3 in this order 
+	/* please keep these next 3 in this order
 	 * some funcs want to iterate over all lists */
 	struct list_head granted;
 	struct list_head converting;
@@ -239,38 +239,38 @@
 	char lvb[DLM_LVB_LEN];
 };
 
-typedef struct _dlm_migratable_lock
+struct dlm_migratable_lock
 {
 	u64 cookie;
 
-	/* these 3 are just padding for the in-memory structure, but 
-	 * list and flags are actually used when sent over the wire */ 
+	/* these 3 are just padding for the in-memory structure, but
+	 * list and flags are actually used when sent over the wire */
 	u16 pad1;
 	u8 list;  // 0=granted, 1=converting, 2=blocked
-	u8 flags; 
+	u8 flags;
 
 	s8 type;
 	s8 convert_type;
 	s8 highest_blocked;
 	u8 node;
-} dlm_migratable_lock;  // 16 bytes
+};  // 16 bytes
 
-struct _dlm_lock
+struct dlm_lock
 {
-	dlm_migratable_lock ml;
+	struct dlm_migratable_lock ml;
 
 	struct list_head list;
 	struct list_head ast_list;
 	struct list_head bast_list;
-	dlm_lock_resource *lockres;
+	struct dlm_lock_resource *lockres;
 	spinlock_t spinlock;
 	struct kref lock_refs;
 
 	// ast and bast must be callable while holding a spinlock!
-	dlm_astlockfunc_t *ast;     
+	dlm_astlockfunc_t *ast;
 	dlm_bastlockfunc_t *bast;
 	void *astdata;
-	dlm_lockstatus *lksb;
+	struct dlm_lockstatus *lksb;
 	unsigned ast_pending:1,
 		 bast_pending:1,
 		 convert_pending:1,
@@ -280,7 +280,8 @@
 };
 
 
-#define DLM_LKSB_KERNEL_ALLOCATED  0x01  // allocated on master node on behalf of remote node
+#define DLM_LKSB_KERNEL_ALLOCATED  0x01  /* allocated on master node on
+					    behalf of remote node */
 #define DLM_LKSB_PUT_LVB           0x02
 #define DLM_LKSB_GET_LVB           0x04
 #define DLM_LKSB_UNUSED2           0x08
@@ -296,8 +297,8 @@
 	DLM_BLOCKED_LIST
 };
 
-static inline struct list_head * dlm_list_idx_to_ptr(dlm_lock_resource *res,
-						     enum dlm_lockres_list idx)
+static inline struct list_head *
+dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx)
 {
 	struct list_head *ret = NULL;
 	if (idx == DLM_GRANTED_LIST)
@@ -318,18 +319,18 @@
 	DLM_MLE_MIGRATION
 };
 
-typedef struct _dlm_lock_name
+struct dlm_lock_name
 {
 	u8 len;
 	u8 name[DLM_LOCKID_NAME_MAX];
-} dlm_lock_name;
+};
 
 /* good god this needs to be trimmed down */
-typedef struct _dlm_master_list_entry
+struct dlm_master_list_entry
 {
 	struct list_head list;
 	struct list_head hb_events;
-	dlm_ctxt *dlm;
+	struct dlm_ctxt *dlm;
 	spinlock_t spinlock;
 	wait_queue_head_t wq;
 	atomic_t woken;
@@ -340,49 +341,51 @@
 	unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
 	u8 master;
 	u8 new_master;
-	enum dlm_mle_type type;    // BLOCK or MASTER
+	enum dlm_mle_type type;
 	struct o2hb_callback_func mle_hb_up;
 	struct o2hb_callback_func mle_hb_down;
 	union {
-		dlm_lock_resource *res;
-		dlm_lock_name name;
+		struct dlm_lock_resource *res;
+		struct dlm_lock_name name;
 	} u;
-} dlm_master_list_entry;
+};
 
-typedef struct _dlm_node_iter
+struct dlm_node_iter
 {
 	unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
 	int curnode;
-} dlm_node_iter;
+};
 
 
-#define DLM_MASTER_REQUEST_MSG  	500
-#define DLM_UNUSED_MSG1            	501
-#define DLM_ASSERT_MASTER_MSG		502
-#define DLM_CREATE_LOCK_MSG		503
-#define DLM_CONVERT_LOCK_MSG		504
-#define DLM_PROXY_AST_MSG		505
-#define DLM_UNLOCK_LOCK_MSG		506
-#define DLM_UNUSED_MSG2			507
-#define DLM_MIGRATE_REQUEST_MSG		508
-#define DLM_MIG_LOCKRES_MSG 		509
-#define DLM_QUERY_JOIN_MSG		510
-#define DLM_ASSERT_JOINED_MSG		511
-#define DLM_CANCEL_JOIN_MSG		512
-#define DLM_EXIT_DOMAIN_MSG		513
-#define DLM_MASTER_REQUERY_MSG		514
-#define DLM_LOCK_REQUEST_MSG		515
-#define DLM_RECO_DATA_DONE_MSG		516
-#define DLM_BEGIN_RECO_MSG		517
-#define DLM_FINALIZE_RECO_MSG		518
+enum {
+	DLM_MASTER_REQUEST_MSG    = 500,
+	DLM_UNUSED_MSG1,         /* 501 */
+	DLM_ASSERT_MASTER_MSG,	 /* 502 */
+	DLM_CREATE_LOCK_MSG,	 /* 503 */
+	DLM_CONVERT_LOCK_MSG,	 /* 504 */
+	DLM_PROXY_AST_MSG,	 /* 505 */
+	DLM_UNLOCK_LOCK_MSG,	 /* 506 */
+	DLM_UNUSED_MSG2,	 /* 507 */
+	DLM_MIGRATE_REQUEST_MSG, /* 508 */
+	DLM_MIG_LOCKRES_MSG, 	 /* 509 */
+	DLM_QUERY_JOIN_MSG,	 /* 510 */
+	DLM_ASSERT_JOINED_MSG,	 /* 511 */
+	DLM_CANCEL_JOIN_MSG,	 /* 512 */
+	DLM_EXIT_DOMAIN_MSG,	 /* 513 */
+	DLM_MASTER_REQUERY_MSG,	 /* 514 */
+	DLM_LOCK_REQUEST_MSG,	 /* 515 */
+	DLM_RECO_DATA_DONE_MSG,	 /* 516 */
+	DLM_BEGIN_RECO_MSG,	 /* 517 */
+	DLM_FINALIZE_RECO_MSG	 /* 518 */
+};
 
 
-typedef struct _dlm_reco_node_data
+struct dlm_reco_node_data
 {
 	int state;
 	u8 node_num;
 	struct list_head list;
-} dlm_reco_node_data;
+};
 
 enum {
 	DLM_RECO_NODE_DATA_DEAD = -1,
@@ -396,14 +399,14 @@
 
 
 enum {
-	DLM_MASTER_RESP_NO,
+	DLM_MASTER_RESP_NO = 0,
 	DLM_MASTER_RESP_YES,
 	DLM_MASTER_RESP_MAYBE,
 	DLM_MASTER_RESP_ERROR
 };
 
 
-typedef struct _dlm_master_request
+struct dlm_master_request
 {
 	u8 node_idx;
 	u8 namelen;
@@ -411,12 +414,12 @@
 	u32 flags;
 
 	u8 name[O2NM_MAX_NAME_LEN];
-} dlm_master_request;
+};
 
 #define DLM_ASSERT_MASTER_MLE_CLEANUP      0x00000001
 #define DLM_ASSERT_MASTER_REQUERY          0x00000002
 #define DLM_ASSERT_MASTER_FINISH_MIGRATION 0x00000004
-typedef struct _dlm_assert_master
+struct dlm_assert_master
 {
 	u8 node_idx;
 	u8 namelen;
@@ -424,9 +427,9 @@
 	u32 flags;
 
 	u8 name[O2NM_MAX_NAME_LEN];
-} dlm_assert_master;
+};
 
-typedef struct _dlm_migrate_request
+struct dlm_migrate_request
 {
 	u8 master;
 	u8 new_master;
@@ -434,9 +437,9 @@
 	u8 pad1;
 	u32 pad2;
 	u8 name[O2NM_MAX_NAME_LEN];
-} dlm_migrate_request;
+};
 
-typedef struct _dlm_master_requery
+struct dlm_master_requery
 {
 	u8 pad1;
 	u8 pad2;
@@ -444,20 +447,20 @@
 	u8 namelen;
 	u32 pad3;
 	u8 name[O2NM_MAX_NAME_LEN];
-} dlm_master_requery;
+};
 
 #define DLM_MRES_RECOVERY   0x01
 #define DLM_MRES_MIGRATION  0x02
 #define DLM_MRES_ALL_DONE   0x04
 
 /*
- * We would like to get one whole lockres into a single network 
+ * We would like to get one whole lockres into a single network
  * message whenever possible.  Generally speaking, there will be
  * at most one dlm_lock on a lockres for each node in the cluster,
  * plus (infrequently) any additional locks coming in from userdlm.
  *
- * struct _dlm_lockres_page 
- * { 
+ * struct _dlm_lockres_page
+ * {
  * 	dlm_migratable_lockres mres;
  * 	dlm_migratable_lock ml[DLM_MAX_MIGRATABLE_LOCKS];
  * 	u8 pad[DLM_MIG_LOCKRES_RESERVED];
@@ -469,20 +472,20 @@
  * and sizeof(dlm_migratable_lockres) = 112 bytes
  * and sizeof(dlm_migratable_lock) = 16 bytes
  *
- * Choosing DLM_MAX_MIGRATABLE_LOCKS=240 and 
+ * Choosing DLM_MAX_MIGRATABLE_LOCKS=240 and
  * DLM_MIG_LOCKRES_RESERVED=128 means we have this:
  *
- *  (DLM_MAX_MIGRATABLE_LOCKS * sizeof(dlm_migratable_lock)) + 
+ *  (DLM_MAX_MIGRATABLE_LOCKS * sizeof(dlm_migratable_lock)) +
  *     sizeof(dlm_migratable_lockres) + DLM_MIG_LOCKRES_RESERVED =
  *        NET_MAX_PAYLOAD_BYTES
  *  (240 * 16) + 112 + 128 = 4080
  *
- * So a lockres would need more than 240 locks before it would 
+ * So a lockres would need more than 240 locks before it would
  * use more than one network packet to recover.  Not too bad.
  */
-#define DLM_MAX_MIGRATABLE_LOCKS   240 
+#define DLM_MAX_MIGRATABLE_LOCKS   240
 
-typedef struct _dlm_migratable_lockres
+struct dlm_migratable_lockres
 {
 	u8 master;
 	u8 lockname_len;
@@ -492,22 +495,23 @@
 	u64 mig_cookie;  // cookie for this lockres migration
 			 // or zero if not needed
 	// 16 bytes
-	u8 lockname[DLM_LOCKID_NAME_MAX];   
+	u8 lockname[DLM_LOCKID_NAME_MAX];
 	// 48 bytes
-	u8 lvb[DLM_LVB_LEN];                
+	u8 lvb[DLM_LVB_LEN];
 	// 112 bytes
-	dlm_migratable_lock ml[0];  // 16 bytes each, begins at byte 112
-} dlm_migratable_lockres;
-#define DLM_MIG_LOCKRES_MAX_LEN  (sizeof(dlm_migratable_lockres) + \
-				(sizeof(dlm_migratable_lock) * \
-				 DLM_MAX_MIGRATABLE_LOCKS) )
+	struct dlm_migratable_lock ml[0];  // 16 bytes each, begins at byte 112
+};
+#define DLM_MIG_LOCKRES_MAX_LEN  \
+	(sizeof(struct dlm_migratable_lockres) + \
+	 (sizeof(struct dlm_migratable_lock) * \
+	  DLM_MAX_MIGRATABLE_LOCKS) )
 
-/* from above, 128 bytes 
+/* from above, 128 bytes
  * for some undetermined future use */
 #define DLM_MIG_LOCKRES_RESERVED   (NET_MAX_PAYLOAD_BYTES - \
 				    DLM_MIG_LOCKRES_MAX_LEN)
 
-typedef struct _dlm_create_lock
+struct dlm_create_lock
 {
 	u64 cookie;
 
@@ -518,9 +522,9 @@
 	u8 namelen;
 
 	u8 name[O2NM_MAX_NAME_LEN];
-} dlm_create_lock;
+};
 
-typedef struct _dlm_convert_lock
+struct dlm_convert_lock
 {
 	u64 cookie;
 
@@ -533,10 +537,10 @@
 	u8 name[O2NM_MAX_NAME_LEN];
 
 	s8 lvb[0];
-} dlm_convert_lock;
-#define DLM_CONVERT_LOCK_MAX_LEN  (sizeof(dlm_convert_lock) + DLM_LVB_LEN)
+};
+#define DLM_CONVERT_LOCK_MAX_LEN  (sizeof(struct dlm_convert_lock)+DLM_LVB_LEN)
 
-typedef struct _dlm_unlock_lock
+struct dlm_unlock_lock
 {
 	u64 cookie;
 
@@ -548,10 +552,10 @@
 	u8 name[O2NM_MAX_NAME_LEN];
 
 	s8 lvb[0];
-} dlm_unlock_lock;
-#define DLM_UNLOCK_LOCK_MAX_LEN  (sizeof(dlm_unlock_lock) + DLM_LVB_LEN)
+};
+#define DLM_UNLOCK_LOCK_MAX_LEN  (sizeof(struct dlm_unlock_lock)+DLM_LVB_LEN)
 
-typedef struct _dlm_proxy_ast
+struct dlm_proxy_ast
 {
 	u64 cookie;
 
@@ -564,8 +568,8 @@
 	u8 name[O2NM_MAX_NAME_LEN];
 
 	s8 lvb[0];
-} dlm_proxy_ast;
-#define DLM_PROXY_AST_MAX_LEN  (sizeof(dlm_proxy_ast) + DLM_LVB_LEN)
+};
+#define DLM_PROXY_AST_MAX_LEN  (sizeof(struct dlm_proxy_ast)+DLM_LVB_LEN)
 
 #define DLM_MOD_KEY (0x666c6172)
 enum dlm_query_join_response {
@@ -574,15 +578,15 @@
 	JOIN_OK_NO_MAP,
 };
 
-typedef struct _dlm_lock_request
+struct dlm_lock_request
 {
 	u8 node_idx;
 	u8 dead_node;
 	u16 pad1;
 	u32 pad2;
-} dlm_lock_request;
+};
 
-typedef struct _dlm_reco_data_done
+struct dlm_reco_data_done
 {
 	u8 node_idx;
 	u8 dead_node;
@@ -590,227 +594,231 @@
 	u32 pad2;
 
 	/* unused for now */
-	/* eventually we can use this to attempt 
+	/* eventually we can use this to attempt
 	 * lvb recovery based on each node's info */
 	u8 reco_lvb[DLM_LVB_LEN];
-} dlm_reco_data_done;
+};
 
-typedef struct _dlm_begin_reco
+struct dlm_begin_reco
 {
 	u8 node_idx;
 	u8 dead_node;
 	u16 pad1;
 	u32 pad2;
-} dlm_begin_reco;
+};
 
 
-typedef struct _dlm_query_join_request
+struct dlm_query_join_request
 {
 	u8 node_idx;
 	u8 pad1[2];
 	u8 name_len;
 	u8 domain[O2NM_MAX_NAME_LEN];
-} dlm_query_join_request;
+};
 
-typedef struct _dlm_assert_joined
+struct dlm_assert_joined
 {
 	u8 node_idx;
 	u8 pad1[2];
 	u8 name_len;
 	u8 domain[O2NM_MAX_NAME_LEN];
-} dlm_assert_joined;
+};
 
-typedef struct _dlm_cancel_join
+struct dlm_cancel_join
 {
 	u8 node_idx;
 	u8 pad1[2];
 	u8 name_len;
 	u8 domain[O2NM_MAX_NAME_LEN];
-} dlm_cancel_join;
+};
 
-typedef struct _dlm_exit_domain
+struct dlm_exit_domain
 {
 	u8 node_idx;
 	u8 pad1[3];
-} dlm_exit_domain;
+};
 
-typedef struct _dlm_finalize_reco
+struct dlm_finalize_reco
 {
 	u8 node_idx;
 	u8 dead_node;
 	u16 pad1;
 	u32 pad2;
-} dlm_finalize_reco;
+};
 
 
-static inline void dlm_query_join_request_to_net(dlm_query_join_request *m)
+static inline void
+dlm_query_join_request_to_net(struct dlm_query_join_request *m)
 {
 	/* do nothing */
 }
-static inline void dlm_query_join_request_to_host(dlm_query_join_request *m)
+static inline void
+dlm_query_join_request_to_host(struct dlm_query_join_request *m)
 {
 	/* do nothing */
 }
-static inline void dlm_assert_joined_to_net(dlm_assert_joined *m)
+static inline void dlm_assert_joined_to_net(struct dlm_assert_joined *m)
 {
 	/* do nothing */
 }
-static inline void dlm_assert_joined_to_host(dlm_assert_joined *m)
+static inline void dlm_assert_joined_to_host(struct dlm_assert_joined *m)
 {
 	/* do nothing */
 }
-static inline void dlm_cancel_join_to_net(dlm_cancel_join *m)
+static inline void dlm_cancel_join_to_net(struct dlm_cancel_join *m)
 {
 	/* do nothing */
 }
-static inline void dlm_cancel_join_to_host(dlm_cancel_join *m)
+static inline void dlm_cancel_join_to_host(struct dlm_cancel_join *m)
 {
 	/* do nothing */
 }
-static inline void dlm_exit_domin_to_net(dlm_exit_domain *m)
+static inline void dlm_exit_domin_to_net(struct dlm_exit_domain *m)
 {
 	/* do nothing */
 }
-static inline void dlm_exit_domain_to_host(dlm_exit_domain *m)
+static inline void dlm_exit_domain_to_host(struct dlm_exit_domain *m)
 {
 	/* do nothing */
 }
-static inline void dlm_master_request_to_net(dlm_master_request *m)
+static inline void dlm_master_request_to_net(struct dlm_master_request *m)
 {
 	m->flags = htonl(m->flags);
 }
-static inline void dlm_master_request_to_host(dlm_master_request *m)
+static inline void dlm_master_request_to_host(struct dlm_master_request *m)
 {
 	m->flags = ntohl(m->flags);
 }
 
-static inline void dlm_assert_master_to_net(dlm_assert_master *m)
+static inline void dlm_assert_master_to_net(struct dlm_assert_master *m)
 {
 	m->flags = htonl(m->flags);
 }
-static inline void dlm_assert_master_to_host(dlm_assert_master *m)
+static inline void dlm_assert_master_to_host(struct dlm_assert_master *m)
 {
 	m->flags = ntohl(m->flags);
 }
 
-static inline void dlm_migrate_request_to_net(dlm_migrate_request *m)
+static inline void dlm_migrate_request_to_net(struct dlm_migrate_request *m)
 {
 	/* do nothing */
 }
-static inline void dlm_migrate_request_to_host(dlm_migrate_request *m)
+static inline void dlm_migrate_request_to_host(struct dlm_migrate_request *m)
 {
 	/* do nothing */
 }
 
-static inline void dlm_master_requery_to_net(dlm_master_requery *m)
+static inline void dlm_master_requery_to_net(struct dlm_master_requery *m)
 {
 	/* do nothing */
 }
-static inline void dlm_master_requery_to_host(dlm_master_requery *m)
+static inline void dlm_master_requery_to_host(struct dlm_master_requery *m)
 {
 	/* do nothing */
 }
 
-static inline void dlm_create_lock_to_net(dlm_create_lock *c)
+static inline void dlm_create_lock_to_net(struct dlm_create_lock *c)
 {
 	c->cookie = cpu_to_be64(c->cookie);
 	c->flags = htonl(c->flags);
 }
-static inline void dlm_create_lock_to_host(dlm_create_lock *c)
+static inline void dlm_create_lock_to_host(struct dlm_create_lock *c)
 {
 	c->cookie = be64_to_cpu(c->cookie);
 	c->flags = ntohl(c->flags);
 }
 
-static inline void dlm_convert_lock_to_net(dlm_convert_lock *c)
+static inline void dlm_convert_lock_to_net(struct dlm_convert_lock *c)
 {
 	c->cookie = cpu_to_be64(c->cookie);
 	c->flags = htonl(c->flags);
 }
-static inline void dlm_convert_lock_to_host(dlm_convert_lock *c)
+static inline void dlm_convert_lock_to_host(struct dlm_convert_lock *c)
 {
 	c->cookie = be64_to_cpu(c->cookie);
 	c->flags = ntohl(c->flags);
 }
 
-static inline void dlm_unlock_lock_to_net(dlm_unlock_lock *u)
+static inline void dlm_unlock_lock_to_net(struct dlm_unlock_lock *u)
 {
 	u->cookie = cpu_to_be64(u->cookie);
 	u->flags = htonl(u->flags);
 }
-static inline void dlm_unlock_lock_to_host(dlm_unlock_lock *u)
+static inline void dlm_unlock_lock_to_host(struct dlm_unlock_lock *u)
 {
 	u->cookie = be64_to_cpu(u->cookie);
 	u->flags = ntohl(u->flags);
 }
 
-static inline void dlm_proxy_ast_to_net(dlm_proxy_ast *a)
+static inline void dlm_proxy_ast_to_net(struct dlm_proxy_ast *a)
 {
 	a->cookie = cpu_to_be64(a->cookie);
 	a->flags = htonl(a->flags);
 }
-static inline void dlm_proxy_ast_to_host(dlm_proxy_ast *a)
+static inline void dlm_proxy_ast_to_host(struct dlm_proxy_ast *a)
 {
 	a->cookie = be64_to_cpu(a->cookie);
 	a->flags = ntohl(a->flags);
 }
-static inline void dlm_migratable_lock_to_net(dlm_migratable_lock *ml)
+static inline void dlm_migratable_lock_to_net(struct dlm_migratable_lock *ml)
 {
 	ml->cookie = cpu_to_be64(ml->cookie);
 }
-static inline void dlm_migratable_lock_to_host(dlm_migratable_lock *ml)
+static inline void dlm_migratable_lock_to_host(struct dlm_migratable_lock *ml)
 {
 	ml->cookie = be64_to_cpu(ml->cookie);
 }
-static inline void dlm_lock_request_to_net(dlm_lock_request *r)
+static inline void dlm_lock_request_to_net(struct dlm_lock_request *r)
 {
 	/* do nothing */
 }
-static inline void dlm_lock_request_to_host(dlm_lock_request *r)
+static inline void dlm_lock_request_to_host(struct dlm_lock_request *r)
 {
 	/* do nothing */
 }
-static inline void dlm_reco_data_done_to_net(dlm_reco_data_done *r)
+static inline void dlm_reco_data_done_to_net(struct dlm_reco_data_done *r)
 {
 	/* do nothing */
 }
-static inline void dlm_reco_data_done_to_host(dlm_reco_data_done *r)
+static inline void dlm_reco_data_done_to_host(struct dlm_reco_data_done *r)
 {
 	/* do nothing */
 }
 
-static inline void dlm_begin_reco_to_net(dlm_begin_reco *r)
+static inline void dlm_begin_reco_to_net(struct dlm_begin_reco *r)
 {
 	/* do nothing */
 }
-static inline void dlm_begin_reco_to_host(dlm_begin_reco *r)
+static inline void dlm_begin_reco_to_host(struct dlm_begin_reco *r)
 {
 	/* do nothing */
 }
-static inline void dlm_finalize_reco_to_net(dlm_finalize_reco *f)
+static inline void dlm_finalize_reco_to_net(struct dlm_finalize_reco *f)
 {
 	/* do nothing */
 }
-static inline void dlm_finalize_reco_to_host(dlm_finalize_reco *f)
+static inline void dlm_finalize_reco_to_host(struct dlm_finalize_reco *f)
 {
 	/* do nothing */
 }
 
-static inline void dlm_migratable_lockres_to_net(dlm_migratable_lockres *mr)
+static inline void
+dlm_migratable_lockres_to_net(struct dlm_migratable_lockres *mr)
 {
 	int i, nr = mr->total_locks;
-	
+
 	BUG_ON(nr < 0);
 	BUG_ON(nr > DLM_MAX_MIGRATABLE_LOCKS);
-	
+
 	mr->total_locks = htonl(mr->total_locks);
 	mr->mig_cookie = cpu_to_be64(mr->mig_cookie);
-	
+
 	for (i=0; i<nr; i++)
 		dlm_migratable_lock_to_net(&(mr->ml[i]));
 }
 
-static inline void dlm_migratable_lockres_to_host(dlm_migratable_lockres *mr)
+static inline void
+dlm_migratable_lockres_to_host(struct dlm_migratable_lockres *mr)
 {
 	int i, nr;
 
@@ -825,9 +833,10 @@
 		dlm_migratable_lock_to_host(&(mr->ml[i]));
 }
 
-static inline dlm_status __dlm_lockres_state_to_status(dlm_lock_resource *res)
+static inline enum dlm_status
+__dlm_lockres_state_to_status(struct dlm_lock_resource *res)
 {
-	dlm_status status = DLM_NORMAL;
+	enum dlm_status status = DLM_NORMAL;
 
 	assert_spin_locked(&res->spinlock);
 
@@ -841,108 +850,134 @@
 	return status;
 }
 
-dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie, dlm_lockstatus *lksb);
-void dlm_lock_get(dlm_lock *lock);
-void dlm_lock_put(dlm_lock *lock);
+struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
+			       struct dlm_lockstatus *lksb);
+void dlm_lock_get(struct dlm_lock *lock);
+void dlm_lock_put(struct dlm_lock *lock);
 
-void dlm_lock_detach_lockres(dlm_lock *lock);
-void dlm_lock_attach_lockres(dlm_lock *lock, dlm_lock_resource *res);
-	
+void dlm_lock_detach_lockres(struct dlm_lock *lock);
+void dlm_lock_attach_lockres(struct dlm_lock *lock,
+			     struct dlm_lock_resource *res);
+
 int dlm_create_lock_handler(o2net_msg *msg, u32 len, void *data);
 int dlm_convert_lock_handler(o2net_msg *msg, u32 len, void *data);
 int dlm_proxy_ast_handler(o2net_msg *msg, u32 len, void *data);
 
-void dlm_revert_pending_convert(dlm_lock_resource *res, dlm_lock *lock);
-void dlm_revert_pending_lock(dlm_lock_resource *res, dlm_lock *lock);
+void dlm_revert_pending_convert(struct dlm_lock_resource *res,
+				struct dlm_lock *lock);
+void dlm_revert_pending_lock(struct dlm_lock_resource *res,
+			     struct dlm_lock *lock);
 
 int dlm_unlock_lock_handler(o2net_msg *msg, u32 len, void *data);
-void dlm_commit_pending_cancel(dlm_lock_resource *res, dlm_lock *lock);
-void dlm_commit_pending_unlock(dlm_lock_resource *res, dlm_lock *lock);
+void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
+			       struct dlm_lock *lock);
+void dlm_commit_pending_unlock(struct dlm_lock_resource *res,
+			       struct dlm_lock *lock);
 
-void dlm_shuffle_lists(dlm_ctxt *dlm, dlm_lock_resource *res);
-int dlm_launch_thread(dlm_ctxt *dlm);
-void dlm_complete_thread(dlm_ctxt *dlm);
-void dlm_flush_asts(dlm_ctxt *dlm);
-int dlm_flush_lockres_asts(dlm_ctxt *dlm, dlm_lock_resource *res);
-int dlm_launch_recovery_thread(dlm_ctxt *dlm);
-void dlm_complete_recovery_thread(dlm_ctxt *dlm);
-void dlm_wait_for_recovery(dlm_ctxt *dlm);
+void dlm_shuffle_lists(struct dlm_ctxt *dlm,
+		       struct dlm_lock_resource *res);
+int dlm_launch_thread(struct dlm_ctxt *dlm);
+void dlm_complete_thread(struct dlm_ctxt *dlm);
+void dlm_flush_asts(struct dlm_ctxt *dlm);
+int dlm_flush_lockres_asts(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
+int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
+void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
+void dlm_wait_for_recovery(struct dlm_ctxt *dlm);
 
-void dlm_get(dlm_ctxt *dlm);
-void dlm_put(dlm_ctxt *dlm);
-dlm_ctxt *dlm_grab(dlm_ctxt *dlm);
-int dlm_domain_fully_joined(dlm_ctxt *dlm);
+void dlm_get(struct dlm_ctxt *dlm);
+void dlm_put(struct dlm_ctxt *dlm);
+struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm);
+int dlm_domain_fully_joined(struct dlm_ctxt *dlm);
 
-void dlm_lockres_calc_usage(dlm_ctxt *dlm,
-			    dlm_lock_resource *res);
-void dlm_purge_lockres(dlm_ctxt *dlm, dlm_lock_resource *lockres);
-void dlm_lockres_get(dlm_lock_resource *res);
-void dlm_lockres_put(dlm_lock_resource *res);
-void __dlm_unhash_lockres(dlm_lock_resource *res);
-void __dlm_insert_lockres(dlm_ctxt *dlm,
-		       dlm_lock_resource *res);
-dlm_lock_resource * __dlm_lookup_lockres(dlm_ctxt *dlm,
-				      const char *name,
-				      unsigned int len);
-dlm_lock_resource * dlm_lookup_lockres(dlm_ctxt *dlm,
-				    const char *name,
-				    unsigned int len);
+void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
+			    struct dlm_lock_resource *res);
+void dlm_purge_lockres(struct dlm_ctxt *dlm,
+		       struct dlm_lock_resource *lockres);
+void dlm_lockres_get(struct dlm_lock_resource *res);
+void dlm_lockres_put(struct dlm_lock_resource *res);
+void __dlm_unhash_lockres(struct dlm_lock_resource *res);
+void __dlm_insert_lockres(struct dlm_ctxt *dlm,
+			  struct dlm_lock_resource *res);
+struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
+						const char *name,
+						unsigned int len);
+struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
+					      const char *name,
+					      unsigned int len);
 
 int dlm_is_host_down(int errno);
-void dlm_change_lockres_owner(dlm_ctxt *dlm, dlm_lock_resource *res, u8 owner);
-dlm_lock_resource * dlm_get_lock_resource(dlm_ctxt *dlm, 
-					  const char *lockid,
-					  int flags);
-dlm_lock_resource *dlm_new_lockres(dlm_ctxt *dlm, 
-				   const char *name, 
-				   unsigned int namelen);
+void dlm_change_lockres_owner(struct dlm_ctxt *dlm,
+			      struct dlm_lock_resource *res,
+			      u8 owner);
+struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
+						 const char *lockid,
+						 int flags);
+struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
+					  const char *name,
+					  unsigned int namelen);
 
-void __dlm_queue_ast(dlm_ctxt *dlm, dlm_lock *lock);
-void dlm_queue_ast(dlm_ctxt *dlm, dlm_lock *lock);
-void __dlm_queue_bast(dlm_ctxt *dlm, dlm_lock *lock);
-void dlm_queue_bast(dlm_ctxt *dlm, dlm_lock *lock);
-void dlm_do_local_ast(dlm_ctxt *dlm, dlm_lock_resource *res, dlm_lock *lock);
-int dlm_do_remote_ast(dlm_ctxt *dlm, dlm_lock_resource *res, dlm_lock *lock);
-void dlm_do_local_bast(dlm_ctxt *dlm, dlm_lock_resource *res, 
-		       dlm_lock *lock, int blocked_type);
-int dlm_send_proxy_ast_msg(dlm_ctxt *dlm, dlm_lock_resource *res, 
-			   dlm_lock *lock, int msg_type, 
+void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
+void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
+void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
+void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
+void dlm_do_local_ast(struct dlm_ctxt *dlm,
+		      struct dlm_lock_resource *res,
+		      struct dlm_lock *lock);
+int dlm_do_remote_ast(struct dlm_ctxt *dlm,
+		      struct dlm_lock_resource *res,
+		      struct dlm_lock *lock);
+void dlm_do_local_bast(struct dlm_ctxt *dlm,
+		       struct dlm_lock_resource *res,
+		       struct dlm_lock *lock,
+		       int blocked_type);
+int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm,
+			   struct dlm_lock_resource *res,
+			   struct dlm_lock *lock,
+			   int msg_type,
 			   int blocked_type, int flags);
-static inline int dlm_send_proxy_bast(dlm_ctxt *dlm, dlm_lock_resource *res, 
-				      dlm_lock *lock, int blocked_type)
+static inline int dlm_send_proxy_bast(struct dlm_ctxt *dlm,
+				      struct dlm_lock_resource *res,
+				      struct dlm_lock *lock,
+				      int blocked_type)
 {
 	return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_BAST,
 				      blocked_type, 0);
 }
 
-static inline int dlm_send_proxy_ast(dlm_ctxt *dlm, dlm_lock_resource *res, 
-				     dlm_lock *lock, int flags)
+static inline int dlm_send_proxy_ast(struct dlm_ctxt *dlm,
+				     struct dlm_lock_resource *res,
+				     struct dlm_lock *lock,
+				     int flags)
 {
 	return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_AST,
 				      0, flags);
 }
 
-void dlm_print_one_lock_resource(dlm_lock_resource *res);
-void __dlm_print_one_lock_resource(dlm_lock_resource *res);
+void dlm_print_one_lock_resource(struct dlm_lock_resource *res);
+void __dlm_print_one_lock_resource(struct dlm_lock_resource *res);
 
-u8 dlm_nm_this_node(dlm_ctxt *dlm);
-void dlm_kick_thread(dlm_ctxt *dlm, dlm_lock_resource *res);
-void __dlm_kick_thread(dlm_ctxt *dlm, dlm_lock_resource *res);
-void __dlm_dirty_lockres(dlm_ctxt *dlm, dlm_lock_resource *res);
-	
+u8 dlm_nm_this_node(struct dlm_ctxt *dlm);
+void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
+void __dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
+void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
 
-int dlm_nm_init(dlm_ctxt *dlm);
-int dlm_heartbeat_init(dlm_ctxt *dlm);
-void __dlm_hb_node_down(dlm_ctxt *dlm, int idx);
+
+int dlm_nm_init(struct dlm_ctxt *dlm);
+int dlm_heartbeat_init(struct dlm_ctxt *dlm);
+void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx);
 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data);
 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data);
-int dlm_hb_node_dead(dlm_ctxt *dlm, int node);
-int __dlm_hb_node_dead(dlm_ctxt *dlm, int node);
+int dlm_hb_node_dead(struct dlm_ctxt *dlm, int node);
+int __dlm_hb_node_dead(struct dlm_ctxt *dlm, int node);
 
-int dlm_migrate_lockres(dlm_ctxt *dlm, dlm_lock_resource *res, u8 target);
-int dlm_finish_migration(dlm_ctxt *dlm, dlm_lock_resource *res, u8 old_master);
-void dlm_lockres_release_ast(dlm_lock_resource *res);
-void __dlm_lockres_reserve_ast(dlm_lock_resource *res);
+int dlm_migrate_lockres(struct dlm_ctxt *dlm,
+			struct dlm_lock_resource *res,
+			u8 target);
+int dlm_finish_migration(struct dlm_ctxt *dlm,
+			 struct dlm_lock_resource *res,
+			 u8 old_master);
+void dlm_lockres_release_ast(struct dlm_lock_resource *res);
+void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res);
 
 int dlm_master_request_handler(o2net_msg *msg, u32 len, void *data);
 int dlm_assert_master_handler(o2net_msg *msg, u32 len, void *data);
@@ -954,26 +989,33 @@
 int dlm_begin_reco_handler(o2net_msg *msg, u32 len, void *data);
 int dlm_finalize_reco_handler(o2net_msg *msg, u32 len, void *data);
 
-int dlm_dispatch_assert_master(dlm_ctxt *dlm, dlm_lock_resource *res,
-			       int ignore_higher, u8 request_from, 
+int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
+			       struct dlm_lock_resource *res,
+			       int ignore_higher,
+			       u8 request_from,
 			       u32 flags);
-void dlm_assert_master_worker(dlm_work_item *item, void *data);
+void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
 
 
-int dlm_send_one_lockres(dlm_ctxt *dlm, dlm_lock_resource *res, 
-			 dlm_migratable_lockres *mres, 
-			 u8 send_to, u8 flags);
-void dlm_move_lockres_to_recovery_list(dlm_ctxt *dlm, dlm_lock_resource *res);
+int dlm_send_one_lockres(struct dlm_ctxt *dlm,
+			 struct dlm_lock_resource *res,
+			 struct dlm_migratable_lockres *mres,
+			 u8 send_to,
+			 u8 flags);
+void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
+				       struct dlm_lock_resource *res);
 
-void dlm_init_lockres(dlm_ctxt *dlm, dlm_lock_resource *res, 
-		      const char *name, unsigned int namelen);
+void dlm_init_lockres(struct dlm_ctxt *dlm,
+		      struct dlm_lock_resource *res,
+		      const char *name,
+		      unsigned int namelen);
 
 /* will exit holding res->spinlock, but may drop in function */
-void __dlm_wait_on_lockres_flags(dlm_lock_resource *res, int flags);
-void __dlm_wait_on_lockres_flags_set(dlm_lock_resource *res, int flags);
+void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags);
+void __dlm_wait_on_lockres_flags_set(struct dlm_lock_resource *res, int flags);
 
 /* will exit holding res->spinlock, but may drop in function */
-static inline void __dlm_wait_on_lockres(dlm_lock_resource *res)
+static inline void __dlm_wait_on_lockres(struct dlm_lock_resource *res)
 {
 	__dlm_wait_on_lockres_flags(res, (DLM_LOCK_RES_IN_PROGRESS|
 				    	  DLM_LOCK_RES_RECOVERING|
@@ -983,16 +1025,26 @@
 
 int dlm_init_mle_cache(void);
 void dlm_destroy_mle_cache(void);
-void dlm_mle_node_down(dlm_ctxt *dlm, dlm_master_list_entry *mle,
-		       struct o2nm_node *node, int idx);
-void dlm_mle_node_up(dlm_ctxt *dlm, dlm_master_list_entry *mle,
-		       struct o2nm_node *node, int idx);
-int dlm_do_assert_master(dlm_ctxt *dlm, const char *lockname, 
-			 unsigned int namelen, void *nodemap,
+void dlm_mle_node_down(struct dlm_ctxt *dlm,
+		       struct dlm_master_list_entry *mle,
+		       struct o2nm_node *node,
+		       int idx);
+void dlm_mle_node_up(struct dlm_ctxt *dlm,
+		     struct dlm_master_list_entry *mle,
+		     struct o2nm_node *node,
+		     int idx);
+int dlm_do_assert_master(struct dlm_ctxt *dlm,
+			 const char *lockname,
+			 unsigned int namelen,
+			 void *nodemap,
 			 u32 flags);
-int dlm_do_migrate_request(dlm_ctxt *dlm, dlm_lock_resource *res, 
-			   u8 master, u8 new_master, dlm_node_iter *iter);
-void dlm_clean_master_list(dlm_ctxt *dlm, u8 dead_node);
+int dlm_do_migrate_request(struct dlm_ctxt *dlm,
+			   struct dlm_lock_resource *res,
+			   u8 master,
+			   u8 new_master,
+			   struct dlm_node_iter *iter);
+void dlm_clean_master_list(struct dlm_ctxt *dlm,
+			   u8 dead_node);
 
 
 int dlm_dump_all_mles(const char __user *data, unsigned int len);
@@ -1030,25 +1082,26 @@
 	return 0;
 }
 
-static inline int dlm_lock_on_list(struct list_head *head, dlm_lock *lock)
+static inline int dlm_lock_on_list(struct list_head *head,
+				   struct dlm_lock *lock)
 {
 	struct list_head *iter;
-	dlm_lock *tmplock;
+	struct dlm_lock *tmplock;
 
 	list_for_each(iter, head) {
-		tmplock = list_entry(iter, dlm_lock, list);
+		tmplock = list_entry(iter, struct dlm_lock, list);
 		if (tmplock == lock)
 			return 1;
 	}
 	return 0;
 }
 
-static inline int dlm_mle_equal(dlm_ctxt *dlm,
-				dlm_master_list_entry *mle,
+static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
+				struct dlm_master_list_entry *mle,
 				const char *name,
 				unsigned int namelen)
 {
-	dlm_lock_resource *res;
+	struct dlm_lock_resource *res;
 
 	if (dlm != mle->dlm)
 		return 0;
@@ -1067,12 +1120,12 @@
 	return 1;
 }
 
-static inline dlm_status dlm_err_to_dlm_status(int err)
+static inline enum dlm_status dlm_err_to_dlm_status(int err)
 {
-	dlm_status ret;
+	enum dlm_status ret;
 	if (err == -ENOMEM)
 		ret = DLM_SYSERR;
-	else if (err == -ETIMEDOUT || o2net_link_down(err, NULL)) 
+	else if (err == -ETIMEDOUT || o2net_link_down(err, NULL))
 		ret = DLM_NOLOCKMGR;
 	else if (err == -EINVAL)
 		ret = DLM_BADPARAM;
@@ -1084,13 +1137,14 @@
 }
 
 
-static inline void dlm_node_iter_init(unsigned long *map, dlm_node_iter *iter)
+static inline void dlm_node_iter_init(unsigned long *map,
+				      struct dlm_node_iter *iter)
 {
 	memcpy(iter->node_map, map, sizeof(iter->node_map));
 	iter->curnode = -1;
 }
 
-static inline int dlm_node_iter_next(dlm_node_iter *iter)
+static inline int dlm_node_iter_next(struct dlm_node_iter *iter)
 {
 	int bit;
 	bit = find_next_bit(iter->node_map, O2NM_MAX_NODES, iter->curnode+1);

Modified: trunk/fs/ocfs2/dlm/dlmconvert.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmconvert.c	2005-06-29 22:47:38 UTC (rev 2447)
+++ trunk/fs/ocfs2/dlm/dlmconvert.c	2005-06-30 00:30:29 UTC (rev 2448)
@@ -55,17 +55,19 @@
 /* NOTE: __dlmconvert_master is the only function in here that
  * needs a spinlock held on entry (res->spinlock) and it is the
  * only one that holds a lock on exit (res->spinlock).
- * All other functions in here need no locks and drop all of 
+ * All other functions in here need no locks and drop all of
  * the locks that they acquire. */
-static dlm_status __dlmconvert_master(dlm_ctxt *dlm, dlm_lock_resource *res, 
-			       dlm_lock *lock, int flags, int type,
-			       int *call_ast, int *kick_thread);
-static dlm_status dlm_send_remote_convert_request(dlm_ctxt *dlm, 
-					   dlm_lock_resource *res, 
-					   dlm_lock *lock, int flags, int type);
+static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm,
+					   struct dlm_lock_resource *res,
+					   struct dlm_lock *lock, int flags,
+					   int type, int *call_ast,
+					   int *kick_thread);
+static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
+					   struct dlm_lock_resource *res,
+					   struct dlm_lock *lock, int flags, int type);
 
-/* 
- * this is only called directly by dlmlock(), and only when the 
+/*
+ * this is only called directly by dlmlock(), and only when the
  * local node is the owner of the lockres
  * locking:
  *   caller needs:  none
@@ -73,11 +75,12 @@
  *   held on exit:  none
  * returns: see __dlmconvert_master
  */
-dlm_status dlmconvert_master(dlm_ctxt *dlm, dlm_lock_resource *res, 
-			     dlm_lock *lock, int flags, int type)
+enum dlm_status dlmconvert_master(struct dlm_ctxt *dlm,
+				  struct dlm_lock_resource *res,
+				  struct dlm_lock *lock, int flags, int type)
 {
 	int call_ast = 0, kick_thread = 0;
-	dlm_status status;
+	enum dlm_status status;
 
 	spin_lock(&res->spinlock);
 	/* we are not in a network handler, this is fine */
@@ -85,7 +88,7 @@
 	__dlm_lockres_reserve_ast(res);
 	res->state |= DLM_LOCK_RES_IN_PROGRESS;
 
-	status = __dlmconvert_master(dlm, res, lock, flags, type, 
+	status = __dlmconvert_master(dlm, res, lock, flags, type,
 				     &call_ast, &kick_thread);
 
 	res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
@@ -97,7 +100,7 @@
 		dlm_queue_ast(dlm, lock);
 	else
 		dlm_lockres_release_ast(res);
-	
+
 	if (kick_thread)
 		dlm_kick_thread(dlm, res);
 
@@ -113,13 +116,15 @@
  *   call_ast: whether ast should be called for this lock
  *   kick_thread: whether dlm_kick_thread should be called
  */
-static dlm_status __dlmconvert_master(dlm_ctxt *dlm, dlm_lock_resource *res, 
-			       dlm_lock *lock, int flags, int type,
-			       int *call_ast, int *kick_thread)
+static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm,
+					   struct dlm_lock_resource *res,
+					   struct dlm_lock *lock, int flags,
+					   int type, int *call_ast,
+					   int *kick_thread)
 {
-	dlm_status status = DLM_NORMAL;
+	enum dlm_status status = DLM_NORMAL;
 	struct list_head *iter;
-	dlm_lock *tmplock=NULL;
+	struct dlm_lock *tmplock=NULL;
 
 	assert_spin_locked(&res->spinlock);
 
@@ -149,7 +154,7 @@
 			case LKM_EXMODE:
 				/* EX + LKM_VALBLK + convert == set lvb */
 				mlog(0, "will set lvb: converting %s->%s\n",
-				     dlm_lock_mode_name(lock->ml.type), 
+				     dlm_lock_mode_name(lock->ml.type),
 				     dlm_lock_mode_name(type));
 				lock->lksb->flags |= DLM_LKSB_PUT_LVB;
 				break;
@@ -173,7 +178,7 @@
 		}
 	}
 
-	
+
 	/* in-place downconvert? */
 	if (type <= lock->ml.type)
 		goto grant;
@@ -181,7 +186,7 @@
 	/* upconvert from here on */
 	status = DLM_NORMAL;
 	list_for_each(iter, &res->granted) {
-		tmplock = list_entry(iter, dlm_lock, list);
+		tmplock = list_entry(iter, struct dlm_lock, list);
 		if (tmplock == lock)
 			continue;
 		if (!dlm_lock_compatible(tmplock->ml.type, type))
@@ -189,7 +194,7 @@
 	}
 
 	list_for_each(iter, &res->converting) {
-		tmplock = list_entry(iter, dlm_lock, list);
+		tmplock = list_entry(iter, struct dlm_lock, list);
 		if (!dlm_lock_compatible(tmplock->ml.type, type))
 			goto switch_queues;
 		/* existing conversion requests take precedence */
@@ -227,14 +232,15 @@
 	list_del_init(&lock->list);
 	list_add_tail(&lock->list, &res->converting);
 
-unlock_exit:	
+unlock_exit:
 	spin_unlock(&lock->spinlock);
 	if (status == DLM_NORMAL)
 		*kick_thread = 1;
 	return status;
 }
 
-void dlm_revert_pending_convert(dlm_lock_resource *res, dlm_lock *lock)
+void dlm_revert_pending_convert(struct dlm_lock_resource *res,
+				struct dlm_lock *lock)
 {
 	/* do not alter lock refcount.  switching lists. */
 	list_del_init(&lock->list);
@@ -250,12 +256,13 @@
  *   held on exit:  none
  * returns: DLM_NORMAL, DLM_RECOVERING, status from remote node
  */
-dlm_status dlmconvert_remote(dlm_ctxt *dlm, dlm_lock_resource *res, 
-			     dlm_lock *lock, int flags, int type)
+enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+				  struct dlm_lock_resource *res,
+				  struct dlm_lock *lock, int flags, int type)
 {
-	dlm_status status;
-	
-	mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type, 
+	enum dlm_status status;
+
+	mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
 	     lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
 
 	spin_lock(&res->spinlock);
@@ -304,7 +311,7 @@
 	/* no locks held here.
 	 * need to wait for a reply as to whether it got queued or not. */
 	status = dlm_send_remote_convert_request(dlm, res, lock, flags, type);
-	
+
 	spin_lock(&res->spinlock);
 denied:
 	res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
@@ -329,30 +336,30 @@
  *   held on exit:  none
  * returns: DLM_NOLOCKMGR, status from remote node
  */
-static dlm_status dlm_send_remote_convert_request(dlm_ctxt *dlm, 
-					   dlm_lock_resource *res, 
-					   dlm_lock *lock, int flags, int type)
+static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
+					   struct dlm_lock_resource *res,
+					   struct dlm_lock *lock, int flags, int type)
 {
-	dlm_convert_lock convert;
+	struct dlm_convert_lock convert;
 	int tmpret;
-	dlm_status ret;
+	enum dlm_status ret;
 	int status = 0;
 	struct iovec iov[2];
 	size_t iovlen = 1;
 
 	mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
 
-	memset(&convert, 0, sizeof(dlm_convert_lock));
+	memset(&convert, 0, sizeof(struct dlm_convert_lock));
 	convert.node_idx = dlm->node_num;
 	convert.requested_type = type;
 	convert.cookie = lock->ml.cookie;
 	convert.namelen = res->lockname.len;
 	convert.flags = flags;
 	memcpy(convert.name, res->lockname.name, convert.namelen);
-	
-	iov[0].iov_len = sizeof(dlm_convert_lock);
+
+	iov[0].iov_len = sizeof(struct dlm_convert_lock);
 	iov[0].iov_base = &convert;
-	
+
 	if (flags & LKM_PUT_LVB) {
 		/* extra data to send if we are updating lvb */
 		iov[1].iov_len = DLM_LVB_LEN;
@@ -361,7 +368,7 @@
 	}
 
 	dlm_convert_lock_to_net(&convert);
-	tmpret = o2net_send_message_iov(DLM_CONVERT_LOCK_MSG, dlm->key, 
+	tmpret = o2net_send_message_iov(DLM_CONVERT_LOCK_MSG, dlm->key,
 					iov, iovlen, res->owner, &status);
 	if (tmpret >= 0) {
 		// successfully sent and received
@@ -396,25 +403,25 @@
  *   caller needs:  none
  *   taken:         takes and drop res->spinlock
  *   held on exit:  none
- * returns: DLM_NORMAL, DLM_IVLOCKID, DLM_BADARGS, 
+ * returns: DLM_NORMAL, DLM_IVLOCKID, DLM_BADARGS,
  *          status from __dlmconvert_master
  */
 int dlm_convert_lock_handler(o2net_msg *msg, u32 len, void *data)
 {
-	dlm_ctxt *dlm = data;
-	dlm_convert_lock *cnv = (dlm_convert_lock *)msg->buf;
-	dlm_lock_resource *res = NULL;
+	struct dlm_ctxt *dlm = data;
+	struct dlm_convert_lock *cnv = (struct dlm_convert_lock *)msg->buf;
+	struct dlm_lock_resource *res = NULL;
 	struct list_head *iter;
-	dlm_lock *lock = NULL;
-	dlm_lockstatus *lksb;
-	dlm_status status = DLM_NORMAL;
+	struct dlm_lock *lock = NULL;
+	struct dlm_lockstatus *lksb;
+	enum dlm_status status = DLM_NORMAL;
 	u32 flags;
 	int call_ast = 0, kick_thread = 0;
 
 	if (!dlm_grab(dlm))
 		return DLM_REJECTED;
 
-	mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), 
+	mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
 			"Domain %s not fully joined!\n", dlm->name);
 
 	dlm_convert_lock_to_host(cnv);
@@ -433,7 +440,7 @@
 		goto leave;
 	}
 
-	mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : 
+	mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" :
 	     (flags & LKM_GET_LVB ? "get lvb" : "none"));
 
 	status = DLM_IVLOCKID;
@@ -443,7 +450,7 @@
 
 	spin_lock(&res->spinlock);
 	list_for_each(iter, &res->granted) {
-		lock = list_entry(iter, dlm_lock, list);
+		lock = list_entry(iter, struct dlm_lock, list);
 		if (lock->ml.cookie == cnv->cookie &&
 		    lock->ml.node == cnv->node_idx) {
 			dlm_lock_get(lock);
@@ -473,7 +480,7 @@
 	if (status == DLM_NORMAL) {
 		__dlm_lockres_reserve_ast(res);
 		res->state |= DLM_LOCK_RES_IN_PROGRESS;
-		status = __dlmconvert_master(dlm, res, lock, flags, 
+		status = __dlmconvert_master(dlm, res, lock, flags,
 					     cnv->requested_type,
 					     &call_ast, &kick_thread);
 		res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
@@ -495,7 +502,7 @@
 	/* either queue the ast or release it */
 	if (call_ast)
 		dlm_queue_ast(dlm, lock);
-	else 
+	else
 		dlm_lockres_release_ast(res);
 
 	if (kick_thread)

Modified: trunk/fs/ocfs2/dlm/dlmconvert.h
===================================================================
--- trunk/fs/ocfs2/dlm/dlmconvert.h	2005-06-29 22:47:38 UTC (rev 2447)
+++ trunk/fs/ocfs2/dlm/dlmconvert.h	2005-06-30 00:30:29 UTC (rev 2448)
@@ -25,9 +25,11 @@
 #ifndef DLMCONVERT_H
 #define DLMCONVERT_H
 
-dlm_status dlmconvert_master(dlm_ctxt *dlm, dlm_lock_resource *res, 
-			     dlm_lock *lock, int flags, int type);
-dlm_status dlmconvert_remote(dlm_ctxt *dlm, dlm_lock_resource *res, 
-			     dlm_lock *lock, int flags, int type);
+enum dlm_status dlmconvert_master(struct dlm_ctxt *dlm,
+				  struct dlm_lock_resource *res,
+				  struct dlm_lock *lock, int flags, int type);
+enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+				  struct dlm_lock_resource *res,
+				  struct dlm_lock *lock, int flags, int type);
 
 #endif

Modified: trunk/fs/ocfs2/dlm/dlmdebug.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmdebug.c	2005-06-29 22:47:38 UTC (rev 2447)
+++ trunk/fs/ocfs2/dlm/dlmdebug.c	2005-06-30 00:30:29 UTC (rev 2448)
@@ -46,35 +46,35 @@
 #define MLOG_MASK_PREFIX ML_DLM
 #include "cluster/masklog.h"
 
-static int dlm_dump_all_lock_resources(const char __user *data, 
+static int dlm_dump_all_lock_resources(const char __user *data,
 					unsigned int len);
-static void dlm_dump_purge_list(dlm_ctxt *dlm);
+static void dlm_dump_purge_list(struct dlm_ctxt *dlm);
 static int dlm_dump_all_purge_lists(const char __user *data, unsigned int len);
 static int dlm_trigger_migration(const char __user *data, unsigned int len);
-static int dlm_dump_one_lock_resource(const char __user *data, 
+static int dlm_dump_one_lock_resource(const char __user *data,
 				       unsigned int len);
 
 static int dlm_parse_domain_and_lockres(char *buf, unsigned int len,
-					dlm_ctxt **dlm,
-					dlm_lock_resource **res);
+					struct dlm_ctxt **dlm,
+					struct dlm_lock_resource **res);
 
 typedef int (dlm_debug_func_t)(const char __user *data, unsigned int len);
 
-typedef struct _dlm_debug_funcs
+struct dlm_debug_funcs
 {
 	char key;
 	dlm_debug_func_t *func;
-} dlm_debug_funcs;
+};
 
-static dlm_debug_funcs dlm_debug_map[] = {
+static struct dlm_debug_funcs dlm_debug_map[] = {
 	{ 'r', dlm_dump_all_lock_resources },
 	{ 'R', dlm_dump_one_lock_resource },
 	{ 'm', dlm_dump_all_mles },
 	{ 'p', dlm_dump_all_purge_lists  },
 	{ 'M', dlm_trigger_migration },
 };
-static int dlm_debug_map_sz = (sizeof(dlm_debug_map) / 
-			       sizeof(dlm_debug_funcs));
+static int dlm_debug_map_sz = (sizeof(dlm_debug_map) /
+			       sizeof(struct dlm_debug_funcs));
 
 static ssize_t write_dlm_debug(struct file *file, const char __user *buf,
 			       size_t count, loff_t *ppos)
@@ -96,7 +96,7 @@
 
 	ret = count;
 	for (i=0; i < dlm_debug_map_sz; i++) {
-		dlm_debug_funcs *d = &dlm_debug_map[i];
+		struct dlm_debug_funcs *d = &dlm_debug_map[i];
 		if (c == d->key) {
 			fn = d->func;
 			if (fn)
@@ -121,30 +121,30 @@
 }
 
 /* lock resource printing is usually very important (printed
- * right before a BUG in some cases), but we'd like to be 
+ * right before a BUG in some cases), but we'd like to be
  * able to shut it off if needed, hence the KERN_NOTICE level */
-static int dlm_dump_all_lock_resources(const char __user *data, 
+static int dlm_dump_all_lock_resources(const char __user *data,
 				       unsigned int len)
 {
-	dlm_ctxt *dlm;
+	struct dlm_ctxt *dlm;
 	struct list_head *iter;
 
-	mlog(ML_NOTICE, "dumping ALL dlm state for node %s\n", 
+	mlog(ML_NOTICE, "dumping ALL dlm state for node %s\n",
 		  system_utsname.nodename);
 	spin_lock(&dlm_domain_lock);
 	list_for_each(iter, &dlm_domains) {
-		dlm = list_entry (iter, dlm_ctxt, list);
+		dlm = list_entry (iter, struct dlm_ctxt, list);
 		dlm_dump_lock_resources(dlm);
 	}
 	spin_unlock(&dlm_domain_lock);
 	return len;
 }
 
-static int dlm_dump_one_lock_resource(const char __user *data, 
+static int dlm_dump_one_lock_resource(const char __user *data,
 				       unsigned int len)
 {
-	dlm_ctxt *dlm;
-	dlm_lock_resource *res;
+	struct dlm_ctxt *dlm;
+	struct dlm_lock_resource *res;
 	char *buf = NULL;
 	int ret = -EINVAL;
 	int tmpret;
@@ -181,9 +181,9 @@
 		goto leave;
 	}
 
-	mlog(ML_NOTICE, "dlm_ctxt: %s, node=%u, key=%u\n",
+	mlog(ML_NOTICE, "struct dlm_ctxt: %s, node=%u, key=%u\n",
 		dlm->name, dlm->node_num, dlm->key);
-	
+
 	dlm_print_one_lock_resource(res);
 	dlm_lockres_put(res);
 	dlm_put(dlm);
@@ -196,101 +196,101 @@
 }
 
 
-void dlm_print_one_lock_resource(dlm_lock_resource *res)
+void dlm_print_one_lock_resource(struct dlm_lock_resource *res)
 {
-	mlog(ML_NOTICE, "lockres: %.*s, owner=%u, state=%u\n", 
-	       res->lockname.len, res->lockname.name, 
+	mlog(ML_NOTICE, "lockres: %.*s, owner=%u, state=%u\n",
+	       res->lockname.len, res->lockname.name,
 	       res->owner, res->state);
 	spin_lock(&res->spinlock);
 	__dlm_print_one_lock_resource(res);
 	spin_unlock(&res->spinlock);
 }
 
-void __dlm_print_one_lock_resource(dlm_lock_resource *res)
+void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
 {
 	struct list_head *iter2;
-	dlm_lock *lock;
+	struct dlm_lock *lock;
 
 	assert_spin_locked(&res->spinlock);
 
-	mlog(ML_NOTICE, "lockres: %.*s, owner=%u, state=%u\n", 
-	       res->lockname.len, res->lockname.name, 
+	mlog(ML_NOTICE, "lockres: %.*s, owner=%u, state=%u\n",
+	       res->lockname.len, res->lockname.name,
 	       res->owner, res->state);
 	mlog(ML_NOTICE, "  granted queue: \n");
 	list_for_each(iter2, &res->granted) {
-		lock = list_entry(iter2, dlm_lock, list);
+		lock = list_entry(iter2, struct dlm_lock, list);
 		spin_lock(&lock->spinlock);
-		mlog(ML_NOTICE, "    type=%d, conv=%d, node=%u, " 
-		       "cookie=%"MLFu64"\n", lock->ml.type, 
-		       lock->ml.convert_type, lock->ml.node, 
+		mlog(ML_NOTICE, "    type=%d, conv=%d, node=%u, "
+		       "cookie=%"MLFu64"\n", lock->ml.type,
+		       lock->ml.convert_type, lock->ml.node,
 		       lock->ml.cookie);
 		spin_unlock(&lock->spinlock);
 	}
 	mlog(ML_NOTICE, "  converting queue: \n");
 	list_for_each(iter2, &res->converting) {
-		lock = list_entry(iter2, dlm_lock, list);
+		lock = list_entry(iter2, struct dlm_lock, list);
 		spin_lock(&lock->spinlock);
-		mlog(ML_NOTICE, "    type=%d, conv=%d, node=%u, " 
-		       "cookie=%"MLFu64"\n", lock->ml.type, 
-		       lock->ml.convert_type, lock->ml.node, 
+		mlog(ML_NOTICE, "    type=%d, conv=%d, node=%u, "
+		       "cookie=%"MLFu64"\n", lock->ml.type,
+		       lock->ml.convert_type, lock->ml.node,
 		       lock->ml.cookie);
 		spin_unlock(&lock->spinlock);
 	}
 	mlog(ML_NOTICE, "  blocked queue: \n");
 	list_for_each(iter2, &res->blocked) {
-		lock = list_entry(iter2, dlm_lock, list);
+		lock = list_entry(iter2, struct dlm_lock, list);
 		spin_lock(&lock->spinlock);
-		mlog(ML_NOTICE, "    type=%d, conv=%d, node=%u, " 
-		       "cookie=%"MLFu64"\n", lock->ml.type, 
-		       lock->ml.convert_type, lock->ml.node, 
+		mlog(ML_NOTICE, "    type=%d, conv=%d, node=%u, "
+		       "cookie=%"MLFu64"\n", lock->ml.type,
+		       lock->ml.convert_type, lock->ml.node,
 		       lock->ml.cookie);
 		spin_unlock(&lock->spinlock);
 	}
 }
 
 
-void dlm_print_one_lock(dlm_lock *lockid)
+void dlm_print_one_lock(struct dlm_lock *lockid)
 {
 	dlm_print_one_lock_resource(lockid->lockres);
 }
 EXPORT_SYMBOL_GPL(dlm_print_one_lock);
 
-void dlm_dump_lock_resources(dlm_ctxt *dlm)
+void dlm_dump_lock_resources(struct dlm_ctxt *dlm)
 {
-	dlm_lock_resource *res;
+	struct dlm_lock_resource *res;
 	struct list_head *iter;
 	struct list_head *bucket;
 	int i;
 
-	mlog(ML_NOTICE, "dlm_ctxt: %s, node=%u, key=%u\n", 
+	mlog(ML_NOTICE, "struct dlm_ctxt: %s, node=%u, key=%u\n",
 		  dlm->name, dlm->node_num, dlm->key);
 	if (!dlm || !dlm->name) {
 		mlog(ML_ERROR, "dlm=%p\n", dlm);
 		return;
 	}
-		
+
 	spin_lock(&dlm->spinlock);
 	for (i=0; i<DLM_HASH_SIZE; i++) {
 		bucket = &(dlm->resources[i]);
 		list_for_each(iter, bucket) {
-			res = list_entry(iter, dlm_lock_resource, list);
+			res = list_entry(iter, struct dlm_lock_resource, list);
 			dlm_print_one_lock_resource(res);
 		}
 	}
 	spin_unlock(&dlm->spinlock);
 }
 
-static void dlm_dump_purge_list(dlm_ctxt *dlm)
+static void dlm_dump_purge_list(struct dlm_ctxt *dlm)
 {
 	struct list_head *iter;
-	dlm_lock_resource *lockres;
+	struct dlm_lock_resource *lockres;
 
 	mlog(ML_NOTICE, "Purge list for DLM Domain \"%s\"\n", dlm->name);
 	mlog(ML_NOTICE, "Last_used\tName\n");
 
 	spin_lock(&dlm->spinlock);
 	list_for_each(iter, &dlm->purge_list) {
-		lockres = list_entry(iter, dlm_lock_resource, purge);
+		lockres = list_entry(iter, struct dlm_lock_resource, purge);
 
 		spin_lock(&lockres->spinlock);
 		mlog(ML_NOTICE, "%lu\t%.*s\n", lockres->last_used,
@@ -302,12 +302,12 @@
 
 static int dlm_dump_all_purge_lists(const char __user *data, unsigned int len)
 {
-	dlm_ctxt *dlm;
+	struct dlm_ctxt *dlm;
 	struct list_head *iter;
 
 	spin_lock(&dlm_domain_lock);
 	list_for_each(iter, &dlm_domains) {
-		dlm = list_entry (iter, dlm_ctxt, list);
+		dlm = list_entry (iter, struct dlm_ctxt, list);
 		dlm_dump_purge_list(dlm);
 	}
 	spin_unlock(&dlm_domain_lock);
@@ -315,8 +315,8 @@
 }
 
 static int dlm_parse_domain_and_lockres(char *buf, unsigned int len,
-					dlm_ctxt **dlm,
-					dlm_lock_resource **res)
+					struct dlm_ctxt **dlm,
+					struct dlm_lock_resource **res)
 {
 	char *resname;
 	char *domainname;
@@ -334,7 +334,7 @@
 	}
 	tmp++;
 	domainname = tmp;
-	
+
 	while (*tmp) {
 		if (*tmp == ' ')
 			break;
@@ -345,7 +345,7 @@
 		goto leave;
 	}
 
-	*tmp = '\0';  // null term the domainname	
+	*tmp = '\0';  // null term the domainname
 	tmp++;
 	resname = tmp;
 	while (*tmp) {
@@ -387,8 +387,8 @@
 
 static int dlm_trigger_migration(const char __user *data, unsigned int len)
 {
-	dlm_lock_resource *res;
-	dlm_ctxt *dlm;
+	struct dlm_lock_resource *res;
+	struct dlm_ctxt *dlm;
 	char *buf = NULL;
 	int ret = -EINVAL;
 	int tmpret;
@@ -427,7 +427,7 @@
 	tmpret = dlm_migrate_lockres(dlm, res, O2NM_MAX_NODES);
 	mlog(0, "dlm_migrate_lockres returned %d\n", tmpret);
 	if (tmpret < 0)
-		mlog(ML_ERROR, "failed to migrate %.*s: %d\n", 
+		mlog(ML_ERROR, "failed to migrate %.*s: %d\n",
 		     res->lockname.len, res->lockname.name, tmpret);
 	dlm_lockres_put(res);
 	dlm_put(dlm);
@@ -530,7 +530,7 @@
 };
 
 
-const char *dlm_errmsg(dlm_status err)
+const char *dlm_errmsg(enum dlm_status err)
 {
 	if (err >= DLM_MAXSTATS || err < 0)
 		return dlm_errmsgs[DLM_MAXSTATS];
@@ -538,7 +538,7 @@
 }
 EXPORT_SYMBOL_GPL(dlm_errmsg);
 
-const char *dlm_errname(dlm_status err)
+const char *dlm_errname(enum dlm_status err)
 {
 	if (err >= DLM_MAXSTATS || err < 0)
 		return dlm_errnames[DLM_MAXSTATS];

Modified: trunk/fs/ocfs2/dlm/dlmdebug.h
===================================================================
--- trunk/fs/ocfs2/dlm/dlmdebug.h	2005-06-29 22:47:38 UTC (rev 2447)
+++ trunk/fs/ocfs2/dlm/dlmdebug.h	2005-06-30 00:30:29 UTC (rev 2448)
@@ -26,6 +26,6 @@
 #define DLMDEBUG_H
 
 void dlm_create_dlm_debug_proc_entry(void);
-void dlm_dump_lock_resources(dlm_ctxt *dlm);
+void dlm_dump_lock_resources(struct dlm_ctxt *dlm);
 
 #endif

Modified: trunk/fs/ocfs2/dlm/dlmdomain.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmdomain.c	2005-06-29 22:47:38 UTC (rev 2447)
+++ trunk/fs/ocfs2/dlm/dlmdomain.c	2005-06-30 00:30:29 UTC (rev 2448)
@@ -51,11 +51,11 @@
 /*
  *
  * spinlock lock ordering: if multiple locks are needed, obey this ordering:
- *    dlm_domain_lock 
- *    dlm_ctxt->spinlock
- *    dlm_lock_resource->spinlock
- *    dlm_ctxt->master_lock
- *    dlm_ctxt->ast_lock
+ *    dlm_domain_lock
+ *    struct dlm_ctxt->spinlock
+ *    struct dlm_lock_resource->spinlock
+ *    struct dlm_ctxt->master_lock
+ *    struct dlm_ctxt->ast_lock
  *    dlm_master_list_entry->spinlock
  *    dlm_lock->spinlock
  *
@@ -72,16 +72,16 @@
 static int dlm_cancel_join_handler(o2net_msg *msg, u32 len, void *data);
 static int dlm_exit_domain_handler(o2net_msg *msg, u32 len, void *data);
 
-static void dlm_unregister_domain_handlers(dlm_ctxt *dlm);
+static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm);
 
-void __dlm_unhash_lockres(dlm_lock_resource *lockres)
+void __dlm_unhash_lockres(struct dlm_lock_resource *lockres)
 {
 	list_del_init(&lockres->list);
 	dlm_lockres_put(lockres);
 }
 
-void __dlm_insert_lockres(dlm_ctxt *dlm,
-		       dlm_lock_resource *res)
+void __dlm_insert_lockres(struct dlm_ctxt *dlm,
+		       struct dlm_lock_resource *res)
 {
 	struct list_head *bucket;
 	struct qstr *q;
@@ -98,13 +98,13 @@
 	list_add_tail(&res->list, bucket);
 }
 
-dlm_lock_resource * __dlm_lookup_lockres(dlm_ctxt *dlm,
+struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
 					 const char *name,
 					 unsigned int len)
 {
 	unsigned int hash;
 	struct list_head *iter;
-	dlm_lock_resource *tmpres=NULL;
+	struct dlm_lock_resource *tmpres=NULL;
 	struct list_head *bucket;
 
 	mlog_entry("%.*s\n", len, name);
@@ -117,7 +117,7 @@
 
 	/* check for pre-existing lock */
 	list_for_each(iter, bucket) {
-		tmpres = list_entry(iter, dlm_lock_resource, list);
+		tmpres = list_entry(iter, struct dlm_lock_resource, list);
 		if (tmpres->lockname.len == len &&
 		    memcmp(tmpres->lockname.name, name, len) == 0) {
 			dlm_lockres_get(tmpres);
@@ -129,11 +129,11 @@
 	return tmpres;
 }
 
-dlm_lock_resource * dlm_lookup_lockres(dlm_ctxt *dlm, 
+struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
 				    const char *name,
 				    unsigned int len)
 {
-	dlm_lock_resource *res;
+	struct dlm_lock_resource *res;
 
 	spin_lock(&dlm->spinlock);
 	res = __dlm_lookup_lockres(dlm, name, len);
@@ -141,9 +141,9 @@
 	return res;
 }
 
-static dlm_ctxt * __dlm_lookup_domain_full(const char *domain, int len)
+static struct dlm_ctxt * __dlm_lookup_domain_full(const char *domain, int len)
 {
-	dlm_ctxt *tmp = NULL;
+	struct dlm_ctxt *tmp = NULL;
 	struct list_head *iter;
 
 	assert_spin_locked(&dlm_domain_lock);
@@ -151,7 +151,7 @@
 	/* tmp->name here is always NULL terminated,
 	 * but domain may not be! */
 	list_for_each(iter, &dlm_domains) {
-		tmp = list_entry (iter, dlm_ctxt, list);
+		tmp = list_entry (iter, struct dlm_ctxt, list);
 		if (strlen(tmp->name) == len &&
 		    memcmp(tmp->name, domain, len)==0)
 			break;
@@ -162,7 +162,7 @@
 }
 
 /* For null terminated domain strings ONLY */
-dlm_ctxt * __dlm_lookup_domain(const char *domain)
+struct dlm_ctxt * __dlm_lookup_domain(const char *domain)
 {
 	assert_spin_locked(&dlm_domain_lock);
 
@@ -176,7 +176,7 @@
 static int dlm_wait_on_domain_helper(const char *domain)
 {
 	int ret = 0;
-	dlm_ctxt *tmp = NULL;
+	struct dlm_ctxt *tmp = NULL;
 
 	spin_lock(&dlm_domain_lock);
 
@@ -190,7 +190,7 @@
 	return ret;
 }
 
-static void dlm_free_ctxt_mem(dlm_ctxt *dlm)
+static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm)
 {
 	if (dlm->resources)
 		free_page((unsigned long) dlm->resources);
@@ -206,9 +206,9 @@
  * will however drop and reacquire it multiple times */
 static void dlm_ctxt_release(struct kref *kref)
 {
-	dlm_ctxt *dlm;
+	struct dlm_ctxt *dlm;
 
-	dlm = container_of(kref, dlm_ctxt, dlm_refs);
+	dlm = container_of(kref, struct dlm_ctxt, dlm_refs);
 
 	BUG_ON(dlm->num_joins);
 	BUG_ON(dlm->dlm_state == DLM_CTXT_JOINED);
@@ -227,14 +227,14 @@
 	spin_lock(&dlm_domain_lock);
 }
 
-void dlm_put(dlm_ctxt *dlm)
+void dlm_put(struct dlm_ctxt *dlm)
 {
 	spin_lock(&dlm_domain_lock);
 	kref_put(&dlm->dlm_refs, dlm_ctxt_release);
 	spin_unlock(&dlm_domain_lock);
 }
 
-static void __dlm_get(dlm_ctxt *dlm)
+static void __dlm_get(struct dlm_ctxt *dlm)
 {
 	kref_get(&dlm->dlm_refs);
 }
@@ -242,15 +242,15 @@
 /* given a questionable reference to a dlm object, gets a reference if
  * it can find it in the list, otherwise returns NULL in which case
  * you shouldn't trust your pointer. */
-dlm_ctxt *dlm_grab(dlm_ctxt *dlm)
+struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm)
 {
 	struct list_head *iter;
-	dlm_ctxt *target = NULL;
+	struct dlm_ctxt *target = NULL;
 
 	spin_lock(&dlm_domain_lock);
 
 	list_for_each(iter, &dlm_domains) {
-		target = list_entry (iter, dlm_ctxt, list);
+		target = list_entry (iter, struct dlm_ctxt, list);
 
 		if (target == dlm) {
 			__dlm_get(target);
@@ -265,14 +265,14 @@
 	return target;
 }
 
-void dlm_get(dlm_ctxt *dlm)
+void dlm_get(struct dlm_ctxt *dlm)
 {
 	spin_lock(&dlm_domain_lock);
 	__dlm_get(dlm);
 	spin_unlock(&dlm_domain_lock);
 }
 
-int dlm_domain_fully_joined(dlm_ctxt *dlm)
+int dlm_domain_fully_joined(struct dlm_ctxt *dlm)
 {
 	int ret;
 
@@ -284,7 +284,7 @@
 	return ret;
 }
 
-static void dlm_complete_dlm_shutdown(dlm_ctxt *dlm)
+static void dlm_complete_dlm_shutdown(struct dlm_ctxt *dlm)
 {
 	dlm_unregister_domain_handlers(dlm);
 	dlm_complete_thread(dlm);
@@ -301,24 +301,24 @@
 	wake_up(&dlm_domain_events);
 }
 
-static void dlm_migrate_all_locks(dlm_ctxt *dlm)
+static void dlm_migrate_all_locks(struct dlm_ctxt *dlm)
 {
 	int i;
-	dlm_lock_resource *res;
+	struct dlm_lock_resource *res;
 
 	mlog(0, "Migrating locks from domain %s\n", dlm->name);
 	spin_lock(&dlm->spinlock);
 	for (i=0; i<DLM_HASH_SIZE; i++) {
 		while (!list_empty(&dlm->resources[i])) {
 			res = list_entry(dlm->resources[i].next,
-				     dlm_lock_resource, list);
-			/* this should unhash the lockres 
+				     struct dlm_lock_resource, list);
+			/* this should unhash the lockres
 			 * and exit with dlm->spinlock */
 			mlog(0, "purging res=%p\n", res);
 			if (res->state & DLM_LOCK_RES_DIRTY ||
 			    !list_empty(&res->dirty)) {
 				/* HACK!  this should absolutely go.
-				 * need to figure out why some empty 
+				 * need to figure out why some empty
 				 * lockreses are still marked dirty */
 				mlog(ML_ERROR, "lockres %.*s dirty!\n",
 				     res->lockname.len, res->lockname.name);
@@ -336,7 +336,7 @@
 	mlog(0, "DONE Migrating locks from domain %s\n", dlm->name);
 }
 
-static int dlm_no_joining_node(dlm_ctxt *dlm)
+static int dlm_no_joining_node(struct dlm_ctxt *dlm)
 {
 	int ret;
 
@@ -347,7 +347,7 @@
 	return ret;
 }
 
-static void dlm_mark_domain_leaving(dlm_ctxt *dlm)
+static void dlm_mark_domain_leaving(struct dlm_ctxt *dlm)
 {
 	/* Yikes, a double spinlock! I need domain_lock for the dlm
 	 * state and the dlm spinlock for join state... Sorry! */
@@ -370,7 +370,7 @@
 	spin_unlock(&dlm_domain_lock);
 }
 
-static void __dlm_print_nodes(dlm_ctxt *dlm)
+static void __dlm_print_nodes(struct dlm_ctxt *dlm)
 {
 	int node = -1;
 
@@ -378,7 +378,7 @@
 
 	mlog(ML_NOTICE, "Nodes in my domain (\"%s\"):\n", dlm->name);
 
-	while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 
+	while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
 				     node + 1)) < O2NM_MAX_NODES) {
 		mlog(ML_NOTICE, " node %d\n", node);
 	}
@@ -386,9 +386,9 @@
 
 static int dlm_exit_domain_handler(o2net_msg *msg, u32 len, void *data)
 {
-	dlm_ctxt *dlm = data;
+	struct dlm_ctxt *dlm = data;
 	unsigned int node;
-	dlm_exit_domain *exit_msg = (dlm_exit_domain *) msg->buf;
+	struct dlm_exit_domain *exit_msg = (struct dlm_exit_domain *) msg->buf;
 
 	mlog_entry("%p %u %p", msg, len, data);
 
@@ -411,11 +411,11 @@
 	return 0;
 }
 
-static int dlm_send_one_domain_exit(dlm_ctxt *dlm,
+static int dlm_send_one_domain_exit(struct dlm_ctxt *dlm,
 				    unsigned int node)
 {
 	int status;
-	dlm_exit_domain leave_msg;
+	struct dlm_exit_domain leave_msg;
 
 	mlog(0, "Asking node %u if we can leave the domain %s me = %u\n",
 		  node, dlm->name, dlm->node_num);
@@ -435,7 +435,7 @@
 }
 
 
-static void dlm_leave_domain(dlm_ctxt *dlm)
+static void dlm_leave_domain(struct dlm_ctxt *dlm)
 {
 	int node, clear_node, status;
 
@@ -447,7 +447,7 @@
 	spin_lock(&dlm->spinlock);
 	/* Clear ourselves from the domain map */
 	clear_bit(dlm->node_num, dlm->domain_map);
-	while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 
+	while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
 				     0)) < O2NM_MAX_NODES) {
 		/* Drop the dlm spinlock. This is safe wrt the domain_map.
 		 * -nodes cannot be added now as the
@@ -482,7 +482,7 @@
 	spin_unlock(&dlm->spinlock);
 }
 
-int dlm_joined(dlm_ctxt *dlm)
+int dlm_joined(struct dlm_ctxt *dlm)
 {
 	int ret = 0;
 
@@ -496,7 +496,7 @@
 	return ret;
 }
 
-int dlm_shutting_down(dlm_ctxt *dlm)
+int dlm_shutting_down(struct dlm_ctxt *dlm)
 {
 	int ret = 0;
 
@@ -510,7 +510,7 @@
 	return ret;
 }
 
-void dlm_unregister_domain(dlm_ctxt *dlm)
+void dlm_unregister_domain(struct dlm_ctxt *dlm)
 {
 	int leave = 0;
 
@@ -548,17 +548,17 @@
 
 static int dlm_query_join_handler(o2net_msg *msg, u32 len, void *data)
 {
-	dlm_query_join_request *query;
+	struct dlm_query_join_request *query;
 	enum dlm_query_join_response response;
-	dlm_ctxt *dlm = NULL;
+	struct dlm_ctxt *dlm = NULL;
 
-	query = (dlm_query_join_request *) msg->buf;
+	query = (struct dlm_query_join_request *) msg->buf;
 	dlm_query_join_request_to_host(query);
 
 	mlog(0, "node %u wants to join domain %s\n", query->node_idx,
 		  query->domain);
 
-	/* 
+	/*
 	 * If heartbeat doesn't consider the node live, tell it
 	 * to back off and try again.  This gives heartbeat a chance
 	 * to catch up.
@@ -610,10 +610,10 @@
 
 static int dlm_assert_joined_handler(o2net_msg *msg, u32 len, void *data)
 {
-	dlm_assert_joined *assert;
-	dlm_ctxt *dlm = NULL;
+	struct dlm_assert_joined *assert;
+	struct dlm_ctxt *dlm = NULL;
 
-	assert = (dlm_assert_joined *) msg->buf;
+	assert = (struct dlm_assert_joined *) msg->buf;
 	dlm_assert_joined_to_host(assert);
 
 	mlog(0, "node %u asserts join on domain %s\n", assert->node_idx,
@@ -643,10 +643,10 @@
 
 static int dlm_cancel_join_handler(o2net_msg *msg, u32 len, void *data)
 {
-	dlm_cancel_join *cancel;
-	dlm_ctxt *dlm = NULL;
+	struct dlm_cancel_join *cancel;
+	struct dlm_ctxt *dlm = NULL;
 
-	cancel = (dlm_cancel_join *) msg->buf;
+	cancel = (struct dlm_cancel_join *) msg->buf;
 	dlm_cancel_join_to_host(cancel);
 
 	mlog(0, "node %u cancels join on domain %s\n", cancel->node_idx,
@@ -670,11 +670,11 @@
 	return 0;
 }
 
-static int dlm_send_one_join_cancel(dlm_ctxt *dlm,
+static int dlm_send_one_join_cancel(struct dlm_ctxt *dlm,
 				    unsigned int node)
 {
 	int status;
-	dlm_cancel_join cancel_msg;
+	struct dlm_cancel_join cancel_msg;
 
 	memset(&cancel_msg, 0, sizeof(cancel_msg));
 	cancel_msg.node_idx = dlm->node_num;
@@ -696,7 +696,7 @@
 }
 
 /* map_size should be in bytes. */
-static int dlm_send_join_cancels(dlm_ctxt *dlm,
+static int dlm_send_join_cancels(struct dlm_ctxt *dlm,
 				 unsigned long *node_map,
 				 unsigned int map_size)
 {
@@ -713,7 +713,7 @@
 
 	status = 0;
 	node = -1;
-	while ((node = find_next_bit(node_map, O2NM_MAX_NODES, 
+	while ((node = find_next_bit(node_map, O2NM_MAX_NODES,
 				     node + 1)) < O2NM_MAX_NODES) {
 		if (node == dlm->node_num)
 			continue;
@@ -732,12 +732,12 @@
 	return status;
 }
 
-static int dlm_request_join(dlm_ctxt *dlm,
+static int dlm_request_join(struct dlm_ctxt *dlm,
 			    int node,
 			    enum dlm_query_join_response *response)
 {
 	int status, retval;
-	dlm_query_join_request join_msg;
+	struct dlm_query_join_request join_msg;
 
 	mlog(0, "querying node %d\n", node);
 
@@ -779,11 +779,11 @@
 	return status;
 }
 
-static int dlm_send_one_join_assert(dlm_ctxt *dlm,
+static int dlm_send_one_join_assert(struct dlm_ctxt *dlm,
 				    unsigned int node)
 {
 	int status;
-	dlm_assert_joined assert_msg;
+	struct dlm_assert_joined assert_msg;
 
 	mlog(0, "Sending join assert to node %u\n", node);
 
@@ -803,14 +803,14 @@
 	return status;
 }
 
-static void dlm_send_join_asserts(dlm_ctxt *dlm,
+static void dlm_send_join_asserts(struct dlm_ctxt *dlm,
 				  unsigned long *node_map)
 {
 	int status, node, live;
 
 	status = 0;
 	node = -1;
-	while ((node = find_next_bit(node_map, O2NM_MAX_NODES, 
+	while ((node = find_next_bit(node_map, O2NM_MAX_NODES,
 				     node + 1)) < O2NM_MAX_NODES) {
 		if (node == dlm->node_num)
 			continue;
@@ -842,7 +842,7 @@
 	unsigned long yes_resp_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
 };
 
-static int dlm_should_restart_join(dlm_ctxt *dlm,
+static int dlm_should_restart_join(struct dlm_ctxt *dlm,
 				   struct domain_join_ctxt *ctxt,
 				   enum dlm_query_join_response response)
 {
@@ -866,7 +866,7 @@
 	return ret;
 }
 
-static int dlm_try_to_join_domain(dlm_ctxt *dlm)
+static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
 {
 	int status = 0, tmpstat, node;
 	struct domain_join_ctxt *ctxt;
@@ -894,7 +894,7 @@
 	spin_unlock(&dlm->spinlock);
 
 	node = -1;
-	while ((node = find_next_bit(ctxt->live_map, O2NM_MAX_NODES, 
+	while ((node = find_next_bit(ctxt->live_map, O2NM_MAX_NODES,
 				     node + 1)) < O2NM_MAX_NODES) {
 		if (node == dlm->node_num)
 			continue;
@@ -962,14 +962,14 @@
 	return status;
 }
 
-static void dlm_unregister_domain_handlers(dlm_ctxt *dlm)
+static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm)
 {
 	o2hb_unregister_callback(&dlm->dlm_hb_up);
 	o2hb_unregister_callback(&dlm->dlm_hb_down);
 	o2net_unregister_handler_list(&dlm->dlm_domain_handlers);
 }
 
-static int dlm_register_domain_handlers(dlm_ctxt *dlm)
+static int dlm_register_domain_handlers(struct dlm_ctxt *dlm)
 {
 	int status;
 
@@ -988,41 +988,41 @@
 		goto bail;
 
 	status = o2net_register_handler(DLM_MASTER_REQUEST_MSG, dlm->key,
-					sizeof(dlm_master_request), 
+					sizeof(struct dlm_master_request),
 					dlm_master_request_handler,
 					dlm, &dlm->dlm_domain_handlers);
 	if (status)
 		goto bail;
 
 	status = o2net_register_handler(DLM_ASSERT_MASTER_MSG, dlm->key,
-					sizeof(dlm_assert_master), 
+					sizeof(struct dlm_assert_master),
 					dlm_assert_master_handler,
 					dlm, &dlm->dlm_domain_handlers);
 	if (status)
 		goto bail;
 
 	status = o2net_register_handler(DLM_CREATE_LOCK_MSG, dlm->key,
-					sizeof(dlm_create_lock), 
+					sizeof(struct dlm_create_lock),
 					dlm_create_lock_handler,
 					dlm, &dlm->dlm_domain_handlers);
 	if (status)
 		goto bail;
 
-	status = o2net_register_handler(DLM_CONVERT_LOCK_MSG, dlm->key, 
+	status = o2net_register_handler(DLM_CONVERT_LOCK_MSG, dlm->key,
 					DLM_CONVERT_LOCK_MAX_LEN,
 					dlm_convert_lock_handler,
 					dlm, &dlm->dlm_domain_handlers);
 	if (status)
 		goto bail;
 
-	status = o2net_register_handler(DLM_UNLOCK_LOCK_MSG, dlm->key, 
+	status = o2net_register_handler(DLM_UNLOCK_LOCK_MSG, dlm->key,
 					DLM_UNLOCK_LOCK_MAX_LEN,
 					dlm_unlock_lock_handler,
 					dlm, &dlm->dlm_domain_handlers);
 	if (status)
 		goto bail;
 
-	status = o2net_register_handler(DLM_PROXY_AST_MSG, dlm->key, 
+	status = o2net_register_handler(DLM_PROXY_AST_MSG, dlm->key,
 					DLM_PROXY_AST_MAX_LEN,
 					dlm_proxy_ast_handler,
 					dlm, &dlm->dlm_domain_handlers);
@@ -1030,57 +1030,57 @@
 		goto bail;
 
 	status = o2net_register_handler(DLM_EXIT_DOMAIN_MSG, dlm->key,
-					sizeof(dlm_exit_domain),
+					sizeof(struct dlm_exit_domain),
 					dlm_exit_domain_handler,
 					dlm, &dlm->dlm_domain_handlers);
 	if (status)
 		goto bail;
 
 	status = o2net_register_handler(DLM_MIGRATE_REQUEST_MSG, dlm->key,
-					sizeof(dlm_migrate_request), 
-					dlm_migrate_request_handler, 
+					sizeof(struct dlm_migrate_request),
+					dlm_migrate_request_handler,
 					dlm, &dlm->dlm_domain_handlers);
 	if (status)
 		goto bail;
 
-	status = o2net_register_handler(DLM_MIG_LOCKRES_MSG, dlm->key, 
-					DLM_MIG_LOCKRES_MAX_LEN, 
-					dlm_mig_lockres_handler, 
+	status = o2net_register_handler(DLM_MIG_LOCKRES_MSG, dlm->key,
+					DLM_MIG_LOCKRES_MAX_LEN,
+					dlm_mig_lockres_handler,
 					dlm, &dlm->dlm_domain_handlers);
 	if (status)
 		goto bail;
 
 	status = o2net_register_handler(DLM_MASTER_REQUERY_MSG, dlm->key,
-					sizeof(dlm_master_requery), 
-					dlm_master_requery_handler, 
+					sizeof(struct dlm_master_requery),
+					dlm_master_requery_handler,
 					dlm, &dlm->dlm_domain_handlers);
 	if (status)
 		goto bail;
 
 	status = o2net_register_handler(DLM_LOCK_REQUEST_MSG, dlm->key,
-					sizeof(dlm_lock_request), 
-					dlm_request_all_locks_handler, 
+					sizeof(struct dlm_lock_request),
+					dlm_request_all_locks_handler,
 					dlm, &dlm->dlm_domain_handlers);
 	if (status)
 		goto bail;
 
 	status = o2net_register_handler(DLM_RECO_DATA_DONE_MSG, dlm->key,
-					sizeof(dlm_reco_data_done), 
-					dlm_reco_data_done_handler, 
+					sizeof(struct dlm_reco_data_done),
+					dlm_reco_data_done_handler,
 					dlm, &dlm->dlm_domain_handlers);
 	if (status)
 		goto bail;
 
 	status = o2net_register_handler(DLM_BEGIN_RECO_MSG, dlm->key,
-					sizeof(dlm_begin_reco), 
-					dlm_begin_reco_handler, 
+					sizeof(struct dlm_begin_reco),
+					dlm_begin_reco_handler,
 					dlm, &dlm->dlm_domain_handlers);
 	if (status)
 		goto bail;
 
 	status = o2net_register_handler(DLM_FINALIZE_RECO_MSG, dlm->key,
-					sizeof(dlm_finalize_reco), 
-					dlm_finalize_reco_handler, 
+					sizeof(struct dlm_finalize_reco),
+					dlm_finalize_reco_handler,
 					dlm, &dlm->dlm_domain_handlers);
 	if (status)
 		goto bail;
@@ -1092,7 +1092,7 @@
 	return status;
 }
 
-static int dlm_join_domain(dlm_ctxt *dlm)
+static int dlm_join_domain(struct dlm_ctxt *dlm)
 {
 	int status;
 
@@ -1163,11 +1163,11 @@
 	return status;
 }
 
-static dlm_ctxt *dlm_alloc_ctxt(const char *domain,
+static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
 				u32 key)
 {
 	int i;
-	dlm_ctxt *dlm = NULL;
+	struct dlm_ctxt *dlm = NULL;
 
 	dlm = kcalloc(1, sizeof(*dlm), GFP_KERNEL);
 	if (!dlm) {
@@ -1229,7 +1229,6 @@
 	init_waitqueue_head(&dlm->reco.event);
 	INIT_LIST_HEAD(&dlm->master_list);
 	INIT_LIST_HEAD(&dlm->mle_hb_events);
-	init_rwsem(&dlm->recovery_sem);
 
 	dlm->joining_node = DLM_LOCK_RES_OWNER_UNKNOWN;
 	init_waitqueue_head(&dlm->dlm_join_events);
@@ -1259,12 +1258,12 @@
 /*
  * dlm_register_domain: one-time setup per "domain"
  */
-dlm_ctxt * dlm_register_domain(const char *domain,
+struct dlm_ctxt * dlm_register_domain(const char *domain,
 			       u32 key)
 {
 	int ret;
-	dlm_ctxt *dlm = NULL;
-	dlm_ctxt *new_ctxt = NULL;
+	struct dlm_ctxt *dlm = NULL;
+	struct dlm_ctxt *new_ctxt = NULL;
 
 	if (strlen(domain) > O2NM_MAX_NAME_LEN) {
 		mlog(ML_ERROR, "domain name length too long\n");
@@ -1350,21 +1349,21 @@
 	int status = 0;
 
 	status = o2net_register_handler(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY,
-					sizeof(dlm_query_join_request),
+					sizeof(struct dlm_query_join_request),
 					dlm_query_join_handler,
 					NULL, &dlm_join_handlers);
 	if (status)
 		goto bail;
 
 	status = o2net_register_handler(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY,
-					sizeof(dlm_assert_joined),
+					sizeof(struct dlm_assert_joined),
 					dlm_assert_joined_handler,
 					NULL, &dlm_join_handlers);
 	if (status)
 		goto bail;
 
 	status = o2net_register_handler(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY,
-					sizeof(dlm_cancel_join),
+					sizeof(struct dlm_cancel_join),
 					dlm_cancel_join_handler,
 					NULL, &dlm_join_handlers);
 
@@ -1376,7 +1375,7 @@
 }
 
 /* Domain eviction callback handling.
- * 
+ *
  * The file system requires notification of node death *before* the
  * dlm completes it's recovery work, otherwise it may be able to
  * acquire locks on resources requiring recovery. Since the dlm can
@@ -1388,7 +1387,7 @@
  * periods of time. */
 static DECLARE_RWSEM(dlm_callback_sem);
 
-void dlm_fire_domain_eviction_callbacks(dlm_ctxt *dlm,
+void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm,
 					int node_num)
 {
 	struct list_head *iter;
@@ -1413,7 +1412,7 @@
 }
 EXPORT_SYMBOL_GPL(dlm_setup_eviction_cb);
 
-void dlm_register_eviction_cb(dlm_ctxt *dlm,
+void dlm_register_eviction_cb(struct dlm_ctxt *dlm,
 			      struct dlm_eviction_cb *cb)
 {
 	down_write(&dlm_callback_sem);

Modified: trunk/fs/ocfs2/dlm/dlmdomain.h
===================================================================
--- trunk/fs/ocfs2/dlm/dlmdomain.h	2005-06-29 22:47:38 UTC (rev 2447)
+++ trunk/fs/ocfs2/dlm/dlmdomain.h	2005-06-30 00:30:29 UTC (rev 2448)
@@ -28,10 +28,10 @@
 extern spinlock_t dlm_domain_lock;
 extern struct list_head dlm_domains;
 
-dlm_ctxt * __dlm_lookup_domain(const char *domain);
-int dlm_joined(dlm_ctxt *dlm);
-int dlm_shutting_down(dlm_ctxt *dlm);
-void dlm_fire_domain_eviction_callbacks(dlm_ctxt *dlm,
+struct dlm_ctxt * __dlm_lookup_domain(const char *domain);
+int dlm_joined(struct dlm_ctxt *dlm);
+int dlm_shutting_down(struct dlm_ctxt *dlm);
+void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm,
 					int node_num);
 
 #endif

Modified: trunk/fs/ocfs2/dlm/dlmfs.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmfs.c	2005-06-29 22:47:38 UTC (rev 2447)
+++ trunk/fs/ocfs2/dlm/dlmfs.c	2005-06-30 00:30:29 UTC (rev 2448)
@@ -70,13 +70,13 @@
 
 struct workqueue_struct *user_dlm_worker;
 
-/* 
- * decodes a set of open flags into a valid lock level and a set of flags. 
+/*
+ * decodes a set of open flags into a valid lock level and a set of flags.
  * returns < 0 if we have invalid flags
  * flags which mean something to us:
  * O_RDONLY -> PRMODE level
  * O_WRONLY -> EXMODE level
- * 
+ *
  * O_NONBLOCK -> LKM_NOQUEUE
  */
 static int dlmfs_decode_open_flags(int open_flags,

Modified: trunk/fs/ocfs2/dlm/dlmlock.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmlock.c	2005-06-29 22:47:38 UTC (rev 2447)
+++ trunk/fs/ocfs2/dlm/dlmlock.c	2005-06-30 00:30:29 UTC (rev 2448)
@@ -56,10 +56,11 @@
 static spinlock_t dlm_cookie_lock = SPIN_LOCK_UNLOCKED;
 static u64 dlm_next_cookie = 1;
 
-static dlm_status dlm_send_remote_lock_request(dlm_ctxt *dlm, 
-					       dlm_lock_resource *res, 
-					       dlm_lock *lock, int flags);
-static void dlm_init_lock(dlm_lock *newlock, int type, u8 node, u64 cookie);
+static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
+					       struct dlm_lock_resource *res,
+					       struct dlm_lock *lock, int flags);
+static void dlm_init_lock(struct dlm_lock *newlock, int type,
+			  u8 node, u64 cookie);
 static void dlm_lock_release(struct kref *kref);
 
 /* Tell us whether we can grant a new lock request.
@@ -69,21 +70,21 @@
  *   held on exit:  none
  * returns: 1 if the lock can be granted, 0 otherwise.
  */
-static int dlm_can_grant_new_lock(dlm_lock_resource *res,
-				  dlm_lock *lock)
+static int dlm_can_grant_new_lock(struct dlm_lock_resource *res,
+				  struct dlm_lock *lock)
 {
 	struct list_head *iter;
-	dlm_lock *tmplock;
+	struct dlm_lock *tmplock;
 
 	list_for_each(iter, &res->granted) {
-		tmplock = list_entry(iter, dlm_lock, list);
+		tmplock = list_entry(iter, struct dlm_lock, list);
 
 		if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
 			return 0;
 	}
 
 	list_for_each(iter, &res->converting) {
-		tmplock = list_entry(iter, dlm_lock, list);
+		tmplock = list_entry(iter, struct dlm_lock, list);
 
 		if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
 			return 0;
@@ -99,19 +100,20 @@
  *   held on exit:  none
  * returns: DLM_NORMAL, DLM_NOTQUEUED
  */
-static dlm_status dlmlock_master(dlm_ctxt *dlm, dlm_lock_resource *res, 
-				 dlm_lock *lock, int flags)
+static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm,
+				      struct dlm_lock_resource *res,
+				      struct dlm_lock *lock, int flags)
 {
 	int call_ast = 0, kick_thread = 0;
-	dlm_status status = DLM_NORMAL;
+	enum dlm_status status = DLM_NORMAL;
 
 	mlog_entry("type=%d\n", lock->ml.type);
 
 	spin_lock(&res->spinlock);
-	/* if called from dlm_create_lock_handler, need to 
+	/* if called from dlm_create_lock_handler, need to
 	 * ensure it will not sleep in dlm_wait_on_lockres */
 	status = __dlm_lockres_state_to_status(res);
-	if (status != DLM_NORMAL && 
+	if (status != DLM_NORMAL &&
 	    lock->ml.node != dlm->node_num) {
 		/* erf.  state changed after lock was dropped. */
 		spin_unlock(&res->spinlock);
@@ -128,11 +130,11 @@
 		dlm_lock_get(lock);
 		list_add_tail(&lock->list, &res->granted);
 
-		/* for the recovery lock, we can't allow the ast 
+		/* for the recovery lock, we can't allow the ast
 		 * to be queued since the dlmthread is already
 		 * frozen.  but the recovery lock is always locked
 		 * with LKM_NOQUEUE so we do not need the ast in
-		 * this special case */ 
+		 * this special case */
 		if (!dlm_is_recovery_lock(res->lockname.name,
 					  res->lockname.len)) {
 			kick_thread = 1;
@@ -156,7 +158,7 @@
 	/* either queue the ast or release it */
 	if (call_ast)
 		dlm_queue_ast(dlm, lock);
-	else 
+	else
 		dlm_lockres_release_ast(res);
 
 	dlm_lockres_calc_usage(dlm, res);
@@ -166,7 +168,8 @@
 	return status;
 }
 
-void dlm_revert_pending_lock(dlm_lock_resource *res, dlm_lock *lock)
+void dlm_revert_pending_lock(struct dlm_lock_resource *res,
+			     struct dlm_lock *lock)
 {
 	/* remove from local queue if it failed */
 	list_del_init(&lock->list);
@@ -174,17 +177,18 @@
 }
 
 
-/* 
+/*
  * locking:
  *   caller needs:  none
  *   taken:         takes and drops res->spinlock
  *   held on exit:  none
  * returns: DLM_DENIED, DLM_RECOVERING, or net status
  */
-static dlm_status dlmlock_remote(dlm_ctxt *dlm, dlm_lock_resource *res, 
-				 dlm_lock *lock, int flags)
+static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
+				      struct dlm_lock_resource *res,
+				      struct dlm_lock *lock, int flags)
 {
-	dlm_status status = DLM_DENIED;
+	enum dlm_status status = DLM_DENIED;
 
 	mlog_entry("type=%d\n", lock->ml.type);
 	mlog(0, "lockres %.*s, flags = 0x%x\n", res->lockname.len,
@@ -195,14 +199,14 @@
 	/* will exit this call with spinlock held */
 	__dlm_wait_on_lockres(res);
 	res->state |= DLM_LOCK_RES_IN_PROGRESS;
-	
+
 	/* add lock to local (secondary) queue */
 	dlm_lock_get(lock);
 	list_add_tail(&lock->list, &res->blocked);
 	lock->lock_pending = 1;
 	spin_unlock(&res->spinlock);
 
-	/* spec seems to say that you will get DLM_NORMAL when the lock 
+	/* spec seems to say that you will get DLM_NORMAL when the lock
 	 * has been queued, meaning we need to wait for a reply here. */
 	status = dlm_send_remote_lock_request(dlm, res, lock, flags);
 
@@ -229,13 +233,13 @@
  *   held on exit:  none
  * returns: DLM_NOLOCKMGR, or net status
  */
-static dlm_status dlm_send_remote_lock_request(dlm_ctxt *dlm, 
-					       dlm_lock_resource *res, 
-					       dlm_lock *lock, int flags)
+static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
+					       struct dlm_lock_resource *res,
+					       struct dlm_lock *lock, int flags)
 {
-	dlm_create_lock create;
+	struct dlm_create_lock create;
 	int tmpret, status = 0;
-	dlm_status ret;
+	enum dlm_status ret;
 
 	mlog_entry_void();
 
@@ -267,22 +271,22 @@
 	return ret;
 }
 
-void dlm_lock_get(dlm_lock *lock)
+void dlm_lock_get(struct dlm_lock *lock)
 {
 	kref_get(&lock->lock_refs);
 }
 
-void dlm_lock_put(dlm_lock *lock)
+void dlm_lock_put(struct dlm_lock *lock)
 {
 	kref_put(&lock->lock_refs, dlm_lock_release);
 }
 
 static void dlm_lock_release(struct kref *kref)
 {
-	dlm_lock *lock;
-	dlm_lockstatus *lksb;
+	struct dlm_lock *lock;
+	struct dlm_lockstatus *lksb;
 
-	lock = container_of(kref, dlm_lock, lock_refs);
+	lock = container_of(kref, struct dlm_lock, lock_refs);
 
 	lksb = lock->lksb;
 	BUG_ON(lksb->lockid != lock);
@@ -293,7 +297,7 @@
 	BUG_ON(lock->bast_pending);
 
 	dlm_lock_detach_lockres(lock);
-	
+
 	if (lksb->flags & DLM_LKSB_KERNEL_ALLOCATED) {
 		mlog(0, "freeing kernel-allocated lksb\n");
 		kfree(lksb);
@@ -305,17 +309,18 @@
 }
 
 /* associate a lock with it's lockres, getting a ref on the lockres */
-void dlm_lock_attach_lockres(dlm_lock *lock, dlm_lock_resource *res)
+void dlm_lock_attach_lockres(struct dlm_lock *lock,
+			     struct dlm_lock_resource *res)
 {
 	dlm_lockres_get(res);
 	lock->lockres = res;
 }
 
 /* drop ref on lockres, if there is still one associated with lock */
-void dlm_lock_detach_lockres(dlm_lock *lock)
+void dlm_lock_detach_lockres(struct dlm_lock *lock)
 {
-	dlm_lock_resource *res;
-	
+	struct dlm_lock_resource *res;
+
 	res = lock->lockres;
 	if (res) {
 		lock->lockres = NULL;
@@ -324,7 +329,8 @@
 	}
 }
 
-static void dlm_init_lock(dlm_lock *newlock, int type, u8 node, u64 cookie)
+static void dlm_init_lock(struct dlm_lock *newlock, int type,
+			  u8 node, u64 cookie)
 {
 	INIT_LIST_HEAD(&newlock->list);
 	INIT_LIST_HEAD(&newlock->ast_list);
@@ -351,14 +357,15 @@
 	kref_init(&newlock->lock_refs, dlm_lock_release);
 }
 
-dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie, dlm_lockstatus *lksb)
+struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
+			       struct dlm_lockstatus *lksb)
 {
-	dlm_lock *lock;
-	
+	struct dlm_lock *lock;
+
 	lock = kcalloc(1, sizeof(*lock), GFP_KERNEL);
 	if (!lock)
 		return NULL;
-	
+
 	if (!lksb) {
 		/* zero memory only if kernel-allocated */
 		lksb = kcalloc(1, sizeof(*lksb), GFP_KERNEL);
@@ -384,12 +391,12 @@
  */
 int dlm_create_lock_handler(o2net_msg *msg, u32 len, void *data)
 {
-	dlm_ctxt *dlm = data;
-	dlm_create_lock *create = (dlm_create_lock *)msg->buf;
-	dlm_lock_resource *res = NULL;
-	dlm_lock *newlock = NULL;
-	dlm_lockstatus *lksb = NULL;
-	dlm_status status = DLM_NORMAL;
+	struct dlm_ctxt *dlm = data;
+	struct dlm_create_lock *create = (struct dlm_create_lock *)msg->buf;
+	struct dlm_lock_resource *res = NULL;
+	struct dlm_lock *newlock = NULL;
+	struct dlm_lockstatus *lksb = NULL;
+	enum dlm_status status = DLM_NORMAL;
 	char *name;
 	unsigned int namelen;
 
@@ -400,7 +407,7 @@
 	if (!dlm_grab(dlm))
 		return DLM_REJECTED;
 
-	mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), 
+	mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
 			"Domain %s not fully joined!\n", dlm->name);
 
 	dlm_create_lock_to_host(create);
@@ -412,8 +419,8 @@
 		goto leave;
 
 	status = DLM_SYSERR;
-	newlock = dlm_new_lock(create->requested_type, 
-			       create->node_idx, 
+	newlock = dlm_new_lock(create->requested_type,
+			       create->node_idx,
 			       create->cookie, NULL);
 	if (!newlock)
 		goto leave;
@@ -433,14 +440,14 @@
 	spin_lock(&res->spinlock);
 	status = __dlm_lockres_state_to_status(res);
 	spin_unlock(&res->spinlock);
-		
+
 	if (status != DLM_NORMAL) {
 		mlog(0, "lockres recovering/migrating/in-progress\n");
 		goto leave;
 	}
 
 	dlm_lock_attach_lockres(newlock, res);
-	
+
 	status = dlmlock_master(dlm, res, newlock, create->flags);
 leave:
 	if (status != DLM_NORMAL)
@@ -473,17 +480,18 @@
 	spin_unlock(&dlm_cookie_lock);
 }
 
-dlm_status dlmlock(dlm_ctxt *dlm, int mode, dlm_lockstatus *lksb, int flags, 
-		   const char *name, dlm_astlockfunc_t *ast, void *data, 
-		   dlm_bastlockfunc_t *bast)
+enum dlm_status dlmlock(struct dlm_ctxt *dlm, int mode,
+			struct dlm_lockstatus *lksb, int flags,
+			const char *name, dlm_astlockfunc_t *ast, void *data,
+			dlm_bastlockfunc_t *bast)
 {
-	dlm_status status;
-	dlm_lock_resource *res = NULL;
-	dlm_lock *lock = NULL;
+	enum dlm_status status;
+	struct dlm_lock_resource *res = NULL;
+	struct dlm_lock *lock = NULL;
 	int convert = 0, recovery = 0;
 
-	/* yes this function is a mess.  
-	 * TODO: clean this up.  lots of common code in the 
+	/* yes this function is a mess.
+	 * TODO: clean this up.  lots of common code in the
 	 *       lock and convert paths, especially in the retry blocks */
 	if (!lksb)
 		return DLM_BADARGS;
@@ -498,7 +506,7 @@
 	convert = (flags & LKM_CONVERT);
 	recovery = (flags & LKM_RECOVERY);
 
-	if (recovery && 
+	if (recovery &&
 	    (!dlm_is_recovery_lock(name, strlen(name)) || convert) ) {
 		goto error;
 	}
@@ -517,7 +525,7 @@
 			     "request\n");
 			goto error;
 		}
-		
+
 		res = lock->lockres;
 		if (!res) {
 			mlog(ML_ERROR, "NULL lockres pointer in convert "
@@ -526,8 +534,8 @@
 		}
 		dlm_lockres_get(res);
 
-		/* XXX: for ocfs2 purposes, the ast/bast/astdata/lksb are 
-	 	 * static after the original lock call.  convert requests will 
+		/* XXX: for ocfs2 purposes, the ast/bast/astdata/lksb are
+	 	 * static after the original lock call.  convert requests will
 		 * ensure that everything is the same, or return DLM_BADARGS.
 	 	 * this means that DLM_DENIED_NOASTS will never be returned.
 	 	 */
@@ -537,7 +545,7 @@
 			mlog(ML_ERROR, "new args:  lksb=%p, ast=%p, bast=%p, "
 			     "astdata=%p\n", lksb, ast, bast, data);
 			mlog(ML_ERROR, "orig args: lksb=%p, ast=%p, bast=%p, "
-			     "astdata=%p\n", lock->lksb, lock->ast, 
+			     "astdata=%p\n", lock->lksb, lock->ast,
 			     lock->bast, lock->astdata);
 			goto error;
 		}
@@ -546,7 +554,7 @@
 
 		if (res->owner == dlm->node_num)
 			status = dlmconvert_master(dlm, res, lock, flags, mode);
-		else 
+		else
 			status = dlmconvert_remote(dlm, res, lock, flags, mode);
 		if (status == DLM_RECOVERING || status == DLM_MIGRATING ||
 		    status == DLM_FORWARD) {
@@ -610,7 +618,7 @@
 
 		if (res->owner == dlm->node_num)
 			status = dlmlock_master(dlm, res, lock, flags);
-		else 
+		else
 			status = dlmlock_remote(dlm, res, lock, flags);
 
 		if (status == DLM_RECOVERING || status == DLM_MIGRATING ||
@@ -636,7 +644,7 @@
 		lksb->status = status;
 	}
 
-	/* put lockres ref from the convert path 
+	/* put lockres ref from the convert path
 	 * or from dlm_get_lock_resource */
 	if (res)
 		dlm_lockres_put(res);

Modified: trunk/fs/ocfs2/dlm/dlmmaster.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmmaster.c	2005-06-29 22:47:38 UTC (rev 2447)
+++ trunk/fs/ocfs2/dlm/dlmmaster.c	2005-06-30 00:30:29 UTC (rev 2448)
@@ -52,11 +52,11 @@
 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
 #include "cluster/masklog.h"
 
-static void dlm_dump_mles(dlm_ctxt *dlm);
+static void dlm_dump_mles(struct dlm_ctxt *dlm);
 
-static void dlm_dump_mles(dlm_ctxt *dlm)
+static void dlm_dump_mles(struct dlm_ctxt *dlm)
 {
-	dlm_master_list_entry *mle;
+	struct dlm_master_list_entry *mle;
 	struct list_head *iter;
 	int i = 0, refs;
 	char *type;
@@ -71,14 +71,14 @@
 
 	list_for_each(iter, &dlm->master_list) {
 		struct kref *k;
-		mle = list_entry(iter, dlm_master_list_entry, list);
-		
+		mle = list_entry(iter, struct dlm_master_list_entry, list);
+
 		k = &mle->mle_refs;
 		type = (mle->type == DLM_MLE_BLOCK ? "BLK" : "MAS");
 		refs = atomic_read(&k->refcount);
 		master = mle->master;
 		attached = (list_empty(&mle->hb_events) ? 'N' : 'Y');
-	
+
 		if (mle->type == DLM_MLE_BLOCK) {
 			namelen = mle->u.name.len;
 			name = mle->u.name.name;
@@ -88,7 +88,7 @@
 		}
 
 		mlog(ML_NOTICE, "  #%3d: %3s  %3d  %3u   %c    (%d)%.*s\n",
-			  i, type, refs, master, attached, 
+			  i, type, refs, master, attached,
 			  namelen, namelen, name);
 	}
 
@@ -101,11 +101,11 @@
 int dlm_dump_all_mles(const char __user *data, unsigned int len)
 {
 	struct list_head *iter;
-	dlm_ctxt *dlm;
+	struct dlm_ctxt *dlm;
 
 	spin_lock(&dlm_domain_lock);
 	list_for_each(iter, &dlm_domains) {
-		dlm = list_entry (iter, dlm_ctxt, list);
+		dlm = list_entry (iter, struct dlm_ctxt, list);
 		mlog(ML_NOTICE, "found dlm: %p, name=%s\n", dlm, dlm->name);
 		dlm_dump_mles(dlm);
 	}
@@ -119,35 +119,42 @@
 
 
 static void dlm_mle_release(struct kref *kref);
-static void dlm_init_mle(dlm_master_list_entry *mle,
+static void dlm_init_mle(struct dlm_master_list_entry *mle,
 			enum dlm_mle_type type,
-			dlm_ctxt *dlm,
-			dlm_lock_resource *res,
+			struct dlm_ctxt *dlm,
+			struct dlm_lock_resource *res,
 			const char *name,
 			unsigned int namelen);
-static void dlm_put_mle(dlm_master_list_entry *mle);
-static void __dlm_put_mle(dlm_master_list_entry *mle);
-static int dlm_find_mle(dlm_ctxt *dlm, dlm_master_list_entry **mle,
+static void dlm_put_mle(struct dlm_master_list_entry *mle);
+static void __dlm_put_mle(struct dlm_master_list_entry *mle);
+static int dlm_find_mle(struct dlm_ctxt *dlm,
+			struct dlm_master_list_entry **mle,
 			char *name, unsigned int namelen);
 
-static int dlm_do_master_request(dlm_master_list_entry *mle, int to);
+static int dlm_do_master_request(struct dlm_master_list_entry *mle, int to);
 
 
-static int dlm_wait_for_lock_mastery(dlm_ctxt *dlm, dlm_lock_resource *res, 
-				     dlm_master_list_entry *mle,
+static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
+				     struct dlm_lock_resource *res,
+				     struct dlm_master_list_entry *mle,
 				     int blocked);
-static int dlm_restart_lock_mastery(dlm_ctxt *dlm, dlm_lock_resource *res,
-				    dlm_master_list_entry *mle, int blocked);
-static int dlm_add_migration_mle(dlm_ctxt *dlm, 
-				 dlm_lock_resource *res, 
-				 dlm_master_list_entry *mle, 
-				 dlm_master_list_entry **oldmle, 
+static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
+				    struct dlm_lock_resource *res,
+				    struct dlm_master_list_entry *mle,
+				    int blocked);
+static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
+				 struct dlm_lock_resource *res,
+				 struct dlm_master_list_entry *mle,
+				 struct dlm_master_list_entry **oldmle,
 				 const char *name, unsigned int namelen,
 				 u8 new_master, u8 master);
 
-static u8 dlm_pick_migration_target(dlm_ctxt *dlm, dlm_lock_resource *res);
-static void dlm_remove_nonlocal_locks(dlm_ctxt *dlm, dlm_lock_resource *res);
-static void dlm_mark_lockres_migrating(dlm_ctxt *dlm, dlm_lock_resource *res);
+static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
+				    struct dlm_lock_resource *res);
+static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
+				      struct dlm_lock_resource *res);
+static void dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
+				       struct dlm_lock_resource *res);
 
 
 int dlm_is_host_down(int errno)
@@ -195,8 +202,8 @@
  * the mle once an "answer" regarding the lock master has been
  * received.
  */
-static inline void __dlm_mle_attach_hb_events(dlm_ctxt *dlm, 
-					      dlm_master_list_entry *mle)
+static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
+					      struct dlm_master_list_entry *mle)
 {
 	assert_spin_locked(&dlm->spinlock);
 
@@ -204,16 +211,16 @@
 }
 
 
-static inline void __dlm_mle_detach_hb_events(dlm_ctxt *dlm,
-					      dlm_master_list_entry *mle)
+static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
+					      struct dlm_master_list_entry *mle)
 {
 	if (!list_empty(&mle->hb_events))
 		list_del_init(&mle->hb_events);
 }
 
 
-static inline void dlm_mle_detach_hb_events(dlm_ctxt *dlm,
-					    dlm_master_list_entry *mle)
+static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
+					    struct dlm_master_list_entry *mle)
 {
 	spin_lock(&dlm->spinlock);
 	__dlm_mle_detach_hb_events(dlm, mle);
@@ -221,9 +228,9 @@
 }
 
 /* remove from list and free */
-static void __dlm_put_mle(dlm_master_list_entry *mle)
+static void __dlm_put_mle(struct dlm_master_list_entry *mle)
 {
-	dlm_ctxt *dlm;
+	struct dlm_ctxt *dlm;
 	dlm = mle->dlm;
 
 	assert_spin_locked(&dlm->spinlock);
@@ -234,9 +241,9 @@
 
 
 /* must not have any spinlocks coming in */
-static void dlm_put_mle(dlm_master_list_entry *mle)
+static void dlm_put_mle(struct dlm_master_list_entry *mle)
 {
-	dlm_ctxt *dlm;
+	struct dlm_ctxt *dlm;
 	dlm = mle->dlm;
 
 	spin_lock(&dlm->spinlock);
@@ -246,20 +253,20 @@
 	spin_unlock(&dlm->spinlock);
 }
 
-static inline void dlm_get_mle(dlm_master_list_entry *mle)
+static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
 {
 	kref_get(&mle->mle_refs);
 }
 
-static void dlm_init_mle(dlm_master_list_entry *mle,
+static void dlm_init_mle(struct dlm_master_list_entry *mle,
 			enum dlm_mle_type type,
-			dlm_ctxt *dlm,
-			dlm_lock_resource *res,
+			struct dlm_ctxt *dlm,
+			struct dlm_lock_resource *res,
 			const char *name,
 			unsigned int namelen)
 {
 	assert_spin_locked(&dlm->spinlock);
-	
+
 	mle->dlm = dlm;
 	mle->type = type;
 	INIT_LIST_HEAD(&mle->list);
@@ -298,16 +305,17 @@
 
 
 /* returns 1 if found, 0 if not */
-static int dlm_find_mle(dlm_ctxt *dlm, dlm_master_list_entry **mle,
+static int dlm_find_mle(struct dlm_ctxt *dlm,
+			struct dlm_master_list_entry **mle,
 			char *name, unsigned int namelen)
 {
-	dlm_master_list_entry *tmpmle;
+	struct dlm_master_list_entry *tmpmle;
 	struct list_head *iter;
 
 	assert_spin_locked(&dlm->master_lock);
-		
+
 	list_for_each(iter, &dlm->master_list) {
-		tmpmle = list_entry(iter, dlm_master_list_entry, list);
+		tmpmle = list_entry(iter, struct dlm_master_list_entry, list);
 		if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
 			continue;
 		dlm_get_mle(tmpmle);
@@ -318,7 +326,7 @@
 }
 
 
-void dlm_mle_node_down(dlm_ctxt *dlm, dlm_master_list_entry *mle,
+void dlm_mle_node_down(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle,
 		       struct o2nm_node *node, int idx)
 {
 	spin_lock(&mle->spinlock);
@@ -328,7 +336,7 @@
 	else
 		clear_bit(idx, mle->node_map);
 
-#if 0	
+#if 0
 	if (test_bit(idx, mle->recovery_map))
 		mlog(0, "node %u already added to recovery map!\n", idx);
 	else
@@ -337,21 +345,21 @@
 	spin_unlock(&mle->spinlock);
 }
 
-void dlm_mle_node_up(dlm_ctxt *dlm, dlm_master_list_entry *mle,
+void dlm_mle_node_up(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle,
 		     struct o2nm_node *node, int idx)
 {
 	spin_lock(&mle->spinlock);
 
-#if 0	
+#if 0
 	if (test_bit(idx, mle->recovery_map))
 		mlog(ML_ERROR, "node up message on node in recovery (%u)!\n",
 		     idx);
-	else 
+	else
 #endif
 	{
 		if (test_bit(idx, mle->node_map))
 			mlog(0, "node %u already in node map!\n", idx);
-		else 
+		else
 			set_bit(idx, mle->node_map);
 	}
 
@@ -361,9 +369,9 @@
 
 int dlm_init_mle_cache(void)
 {
-	dlm_mle_cache = kmem_cache_create("dlm_mle_cache", 
-					  sizeof(dlm_master_list_entry), 
-					  0, SLAB_HWCACHE_ALIGN, 
+	dlm_mle_cache = kmem_cache_create("dlm_mle_cache",
+					  sizeof(struct dlm_master_list_entry),
+					  0, SLAB_HWCACHE_ALIGN,
 					  NULL, NULL);
 	if (dlm_mle_cache == NULL)
 		return -ENOMEM;
@@ -378,20 +386,20 @@
 
 static void dlm_mle_release(struct kref *kref)
 {
-	dlm_master_list_entry *mle;
-	dlm_ctxt *dlm;
+	struct dlm_master_list_entry *mle;
+	struct dlm_ctxt *dlm;
 
 	mlog_entry_void();
 
-	mle = container_of(kref, dlm_master_list_entry, mle_refs);
+	mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
 	dlm = mle->dlm;
 
 	if (mle->type == DLM_MLE_BLOCK) {
-		mlog(0, "calling mle_release for %.*s, type %d\n", 
+		mlog(0, "calling mle_release for %.*s, type %d\n",
 		     mle->u.name.len, mle->u.name.name, mle->type);
 	} else {
-		mlog(0, "calling mle_release for %.*s, type %d\n", 
-		     mle->u.res->lockname.len, 
+		mlog(0, "calling mle_release for %.*s, type %d\n",
+		     mle->u.res->lockname.len,
 		     mle->u.res->lockname.name, mle->type);
 	}
 	assert_spin_locked(&dlm->spinlock);
@@ -414,8 +422,8 @@
  * LOCK RESOURCE FUNCTIONS
  */
 
-static void dlm_set_lockres_owner(dlm_ctxt *dlm,
-				  dlm_lock_resource *res,
+static void dlm_set_lockres_owner(struct dlm_ctxt *dlm,
+				  struct dlm_lock_resource *res,
 				  u8 owner)
 {
 	assert_spin_locked(&res->spinlock);
@@ -432,7 +440,8 @@
 	res->owner = owner;
 }
 
-void dlm_change_lockres_owner(dlm_ctxt *dlm, dlm_lock_resource *res, u8 owner)
+void dlm_change_lockres_owner(struct dlm_ctxt *dlm,
+			      struct dlm_lock_resource *res, u8 owner)
 {
 	assert_spin_locked(&res->spinlock);
 
@@ -452,10 +461,10 @@
 
 static void dlm_lockres_release(struct kref *kref)
 {
-	dlm_lock_resource *res;
-	
-	res = container_of(kref, dlm_lock_resource, refs);
+	struct dlm_lock_resource *res;
 
+	res = container_of(kref, struct dlm_lock_resource, refs);
+
 	/* This should not happen -- all lockres' have a name
 	 * associated with them at init time. */
 	BUG_ON(!res->lockname.name);
@@ -478,17 +487,17 @@
 	kfree(res);
 }
 
-void dlm_lockres_get(dlm_lock_resource *res)
+void dlm_lockres_get(struct dlm_lock_resource *res)
 {
 	kref_get(&res->refs);
 }
 
-void dlm_lockres_put(dlm_lock_resource *res)
+void dlm_lockres_put(struct dlm_lock_resource *res)
 {
 	kref_put(&res->refs, dlm_lockres_release);
 }
 
-void dlm_init_lockres(dlm_ctxt *dlm, dlm_lock_resource *res, 
+void dlm_init_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
 		      const char *name, unsigned int namelen)
 {
 	char *qname;
@@ -521,7 +530,7 @@
 	spin_lock(&res->spinlock);
 	dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
 	spin_unlock(&res->spinlock);
-	
+
 	res->state = DLM_LOCK_RES_IN_PROGRESS;
 
 	res->last_used = 0;
@@ -529,13 +538,13 @@
 	memset(res->lvb, 0, DLM_LVB_LEN);
 }
 
-dlm_lock_resource *dlm_new_lockres(dlm_ctxt *dlm, 
-				   const char *name, 
+struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
+				   const char *name,
 				   unsigned int namelen)
 {
-	dlm_lock_resource *res;
+	struct dlm_lock_resource *res;
 
-	res = kmalloc(sizeof(dlm_lock_resource), GFP_KERNEL);
+	res = kmalloc(sizeof(struct dlm_lock_resource), GFP_KERNEL);
 	if (!res)
 		return NULL;
 
@@ -553,10 +562,10 @@
  * lookup a lock resource by name.
  * may already exist in the hashtable.
  * lockid is null terminated
- * 
+ *
  * if not, allocate enough for the lockres and for
  * the temporary structure used in doing the mastering.
- * 
+ *
  * also, do a lookup in the dlm->master_list to see
  * if another node has begun mastering the same lock.
  * if so, there should be a block entry in there
@@ -565,15 +574,15 @@
  * to assert_master (or die).
  *
  */
-dlm_lock_resource * dlm_get_lock_resource(dlm_ctxt *dlm, 
+struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
 					  const char *lockid,
 					  int flags)
 {
-	dlm_lock_resource *tmpres=NULL, *res=NULL;
-	dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
+	struct dlm_lock_resource *tmpres=NULL, *res=NULL;
+	struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
 	int blocked = 0;
 	int ret, nodenum;
-	dlm_node_iter iter;
+	struct dlm_node_iter iter;
 	unsigned int namelen;
 
 	BUG_ON(!lockid);
@@ -599,7 +608,7 @@
 		spin_unlock(&dlm->spinlock);
 		mlog(0, "allocating a new resource\n");
 		/* nothing found and we need to allocate one. */
-		mle = (dlm_master_list_entry *)kmem_cache_alloc(dlm_mle_cache,
+		mle = (struct dlm_master_list_entry *)kmem_cache_alloc(dlm_mle_cache,
 								GFP_KERNEL);
 		if (!mle)
 			return NULL;
@@ -638,9 +647,9 @@
 			BUG();
 		} else if (tmpmle->type == DLM_MLE_MIGRATION) {
 			/* migration is in progress! */
-			/* the good news is that we now know the 
+			/* the good news is that we now know the
 			 * "current" master (mle->master). */
-		
+
 			spin_unlock(&dlm->master_lock);
 			assert_spin_locked(&dlm->spinlock);
 
@@ -714,7 +723,7 @@
 		     "master request now\n");
 		goto redo_request;
 	}
-	
+
 	mlog(0, "lockres mastered by %u\n", res->owner);
 	/* make sure we never continue without this */
 	BUG_ON(res->owner == O2NM_MAX_NODES);
@@ -735,8 +744,9 @@
 
 #define DLM_MASTERY_TIMEOUT_MS   5000
 
-static int dlm_wait_for_lock_mastery(dlm_ctxt *dlm, dlm_lock_resource *res, 
-				     dlm_master_list_entry *mle,
+static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
+				     struct dlm_lock_resource *res,
+				     struct dlm_master_list_entry *mle,
 				     int blocked)
 {
 	u8 m;
@@ -758,7 +768,7 @@
 
 	spin_lock(&mle->spinlock);
 	m = mle->master;
-	map_changed = (memcmp(mle->vote_map, mle->node_map, 
+	map_changed = (memcmp(mle->vote_map, mle->node_map,
 			      sizeof(mle->vote_map)) != 0);
 	voting_done = (memcmp(mle->vote_map, mle->response_map,
 			     sizeof(mle->vote_map)) == 0);
@@ -787,7 +797,7 @@
 			bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
 			if (dlm->node_num <= bit) {
 				/* my node number is lowest.
-			 	 * now tell other nodes that I am 
+			 	 * now tell other nodes that I am
 				 * mastering this. */
 				mle->master = dlm->node_num;
 				assert = 1;
@@ -806,13 +816,13 @@
 
 		/*
 		if (atomic_read(&mle->mle_refs.refcount) < 2)
-			mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle, 
-			atomic_read(&mle->mle_refs.refcount), 
+			mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
+			atomic_read(&mle->mle_refs.refcount),
 			res->lockname.len, res->lockname.name);
 		*/
 		atomic_set(&mle->woken, 0);
-		(void)wait_event_timeout(mle->wq, 
-					 (atomic_read(&mle->woken) == 1), 
+		(void)wait_event_timeout(mle->wq,
+					 (atomic_read(&mle->woken) == 1),
 					 timeo);
 		if (res->owner == O2NM_MAX_NODES) {
 			mlog(0, "waiting again\n");
@@ -823,10 +833,10 @@
 		goto leave;
 	}
 
-	ret = 0;   /* done */	
+	ret = 0;   /* done */
 	if (assert) {
 		m = dlm->node_num;
-		ret = dlm_do_assert_master(dlm, res->lockname.name, 
+		ret = dlm_do_assert_master(dlm, res->lockname.name,
 					   res->lockname.len, mle->vote_map, 0);
 		if (ret) {
 			/* This is a failure in the network path,
@@ -836,7 +846,7 @@
 			 * due to node death. */
 			mlog_errno(ret);
 		}
-		/* no longer need to restart lock mastery.  
+		/* no longer need to restart lock mastery.
 		 * all living nodes have been contacted. */
 		ret = 0;
 	}
@@ -866,7 +876,7 @@
 };
 
 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
-				      unsigned long *orig_bm, 
+				      unsigned long *orig_bm,
 				      unsigned long *cur_bm)
 {
 	unsigned long p1, p2;
@@ -891,7 +901,7 @@
 	if (iter->curnode >= O2NM_MAX_NODES)
 		return -ENOENT;
 
-	bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES, 
+	bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
 			    iter->curnode+1);
 	if (bit >= O2NM_MAX_NODES) {
 		iter->curnode = O2NM_MAX_NODES;
@@ -909,8 +919,10 @@
 }
 
 
-static int dlm_restart_lock_mastery(dlm_ctxt *dlm, dlm_lock_resource *res,
-				    dlm_master_list_entry *mle, int blocked)
+static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
+				    struct dlm_lock_resource *res,
+				    struct dlm_master_list_entry *mle,
+				    int blocked)
 {
 	struct dlm_bitmap_diff_iter bdi;
 	enum dlm_node_state_change sc;
@@ -926,7 +938,7 @@
 	node = dlm_bitmap_diff_iter_next(&bdi, &sc);
 	while (node >= 0) {
 		if (sc == NODE_UP) {
-			/* a node came up.  easy.  might not even need 
+			/* a node came up.  easy.  might not even need
 			 * to talk to it if its node number is higher
 			 * or if we are already blocked. */
 			mlog(0, "node up! %d\n", node);
@@ -944,7 +956,7 @@
 			set_bit(node, mle->vote_map);
 		} else {
 			mlog(ML_ERROR, "node down! %d\n", node);
-		
+
 			/* if the node wasn't involved in mastery skip it,
 			 * but clear it out from the maps so that it will
 			 * not affect mastery of this lockres */
@@ -957,7 +969,7 @@
 			 * dead node wasn't the expected master, or there is
 			 * another node in the maybe_map, keep waiting */
 			if (blocked) {
-				int lowest = find_next_bit(mle->maybe_map, 
+				int lowest = find_next_bit(mle->maybe_map,
 						       O2NM_MAX_NODES, 0);
 
 				/* act like it was never there */
@@ -967,18 +979,18 @@
 					goto next;
 
 				mlog(ML_ERROR, "expected master %u died while "
-				     "this node was blocked waiting on it!\n", 
+				     "this node was blocked waiting on it!\n",
 				     node);
-				lowest = find_next_bit(mle->maybe_map, 
-						       O2NM_MAX_NODES, 
+				lowest = find_next_bit(mle->maybe_map,
+						       O2NM_MAX_NODES,
 						       lowest+1);
 				if (lowest < O2NM_MAX_NODES) {
 					mlog(0, "still blocked. waiting "
 					     "on %u now\n", lowest);
 					goto next;
-				} 
+				}
 
-				/* mle is an MLE_BLOCK, but there is now 
+				/* mle is an MLE_BLOCK, but there is now
 				 * nothing left to block on.  we need to return
 				 * all the way back out and try again with
 				 * an MLE_MASTER. dlm_do_local_recovery_cleanup
@@ -986,9 +998,9 @@
 				mlog(0, "no longer blocking. we can "
 				     "try to master this here\n");
 				mle->type = DLM_MLE_MASTER;
-				memset(mle->maybe_map, 0, 
+				memset(mle->maybe_map, 0,
 				       sizeof(mle->maybe_map));
-				memset(mle->response_map, 0, 
+				memset(mle->response_map, 0,
 				       sizeof(mle->maybe_map));
 				memcpy(mle->vote_map, mle->node_map,
 				       sizeof(mle->node_map));
@@ -1003,9 +1015,9 @@
 				goto next;
 
 			mlog(0, "dead node in map!\n");
-			/* yuck. go back and re-contact all nodes 
+			/* yuck. go back and re-contact all nodes
 			 * in the vote_map, removing this node. */
-			memset(mle->response_map, 0, 
+			memset(mle->response_map, 0,
 			       sizeof(mle->response_map));
 		}
 		ret = -EAGAIN;
@@ -1018,18 +1030,18 @@
 
 /*
  * DLM_MASTER_REQUEST_MSG
- * 
- * returns: 0 on success, 
+ *
+ * returns: 0 on success,
  *          -errno on a network error
  *
  * on error, the caller should assume the target node is "dead"
- * 
+ *
  */
 
-static int dlm_do_master_request(dlm_master_list_entry *mle, int to)
+static int dlm_do_master_request(struct dlm_master_list_entry *mle, int to)
 {
-	dlm_ctxt *dlm = mle->dlm;
-	dlm_master_request request;
+	struct dlm_ctxt *dlm = mle->dlm;
+	struct dlm_master_request request;
 	int ret, response=0, resend;
 
 	memset(&request, 0, sizeof(request));
@@ -1042,7 +1054,7 @@
 		memcpy(request.name, mle->u.name.name, request.namelen);
 	} else {
 		request.namelen = mle->u.res->lockname.len;
-		memcpy(request.name, mle->u.res->lockname.name, 
+		memcpy(request.name, mle->u.res->lockname.name,
 			request.namelen);
 	}
 
@@ -1126,10 +1138,10 @@
 int dlm_master_request_handler(o2net_msg *msg, u32 len, void *data)
 {
 	u8 response = DLM_MASTER_RESP_MAYBE;
-	dlm_ctxt *dlm = data;
-	dlm_lock_resource *res;
-	dlm_master_request *request = (dlm_master_request *) msg->buf;
-	dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
+	struct dlm_ctxt *dlm = data;
+	struct dlm_lock_resource *res;
+	struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
+	struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
 	char *name;
 	unsigned int namelen;
 	int found, ret;
@@ -1178,12 +1190,12 @@
 				kmem_cache_free(dlm_mle_cache, mle);
 
 			/* this node is the owner.
-			 * there is some extra work that needs to 
+			 * there is some extra work that needs to
 			 * happen now.  the requesting node has
-			 * caused all nodes up to this one to 
+			 * caused all nodes up to this one to
 			 * create mles.  this node now needs to
 			 * go back and clean those up. */
-			ret = dlm_dispatch_assert_master(dlm, res, 1, 
+			ret = dlm_dispatch_assert_master(dlm, res, 1,
 							 request->node_idx,
 							 flags);
 			if (ret < 0) {
@@ -1201,7 +1213,7 @@
 			goto send_response;
 		}
 
-		/* ok, there is no owner.  either this node is 
+		/* ok, there is no owner.  either this node is
 		 * being blocked, or it is actively trying to
 		 * master this lock. */
 		if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
@@ -1253,11 +1265,11 @@
 		goto send_response;
 	}
 
-	/* 
-	 * lockres doesn't exist on this node 
-	 * if there is an MLE_BLOCK, return NO 
+	/*
+	 * lockres doesn't exist on this node
+	 * if there is an MLE_BLOCK, return NO
 	 * if there is an MLE_MASTER, return MAYBE
-	 * otherwise, add an MLE_BLOCK, return NO 
+	 * otherwise, add an MLE_BLOCK, return NO
 	 */
 	spin_lock(&dlm->master_lock);
 	found = dlm_find_mle(dlm, &tmpmle, name, namelen);
@@ -1268,7 +1280,7 @@
 			spin_unlock(&dlm->master_lock);
 			spin_unlock(&dlm->spinlock);
 
-			mle = (dlm_master_list_entry *)
+			mle = (struct dlm_master_list_entry *)
 				kmem_cache_alloc(dlm_mle_cache, GFP_KERNEL);
 			if (!mle) {
 				// bad bad bad... this sucks.
@@ -1276,7 +1288,7 @@
 				goto send_response;
 			}
 			spin_lock(&dlm->spinlock);
-			dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, 
+			dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL,
 					 name, namelen);
 			spin_unlock(&dlm->spinlock);
 			goto way_up_top;
@@ -1329,13 +1341,13 @@
  * can periodically run all locks owned by this node
  * and re-assert across the cluster...
  */
-int dlm_do_assert_master(dlm_ctxt *dlm, const char *lockname, 
+int dlm_do_assert_master(struct dlm_ctxt *dlm, const char *lockname,
 			 unsigned int namelen, void *nodemap,
 			 u32 flags)
 {
-	dlm_assert_master assert;
+	struct dlm_assert_master assert;
 	int to, tmpret;
-	dlm_node_iter iter;
+	struct dlm_node_iter iter;
 	int ret = 0;
 
 	BUG_ON(namelen > O2NM_MAX_NAME_LEN);
@@ -1353,7 +1365,7 @@
 		assert.flags = flags;
 
 		dlm_assert_master_to_net(&assert);
-		tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key, 
+		tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
 					    &assert, sizeof(assert), to, &r);
 		if (tmpret < 0) {
 			mlog(ML_ERROR, "assert_master returned %d!\n", tmpret);
@@ -1364,7 +1376,7 @@
 			/* a node died.  finish out the rest of the nodes. */
 			mlog(ML_ERROR, "link to %d went down!\n", to);
 			/* any nonzero status return will do */
-			ret = tmpret; 
+			ret = tmpret;
 		} else if (r < 0) {
 			/* ok, something horribly messed.  kill thyself. */
 			mlog(ML_ERROR,"during assert master of %.*s to %u, "
@@ -1388,10 +1400,10 @@
  */
 int dlm_assert_master_handler(o2net_msg *msg, u32 len, void *data)
 {
-	dlm_ctxt *dlm = data;
-	dlm_master_list_entry *mle = NULL;
-	dlm_assert_master *assert = (dlm_assert_master *)msg->buf;
-	dlm_lock_resource *res;
+	struct dlm_ctxt *dlm = data;
+	struct dlm_master_list_entry *mle = NULL;
+	struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
+	struct dlm_lock_resource *res;
 	char *name;
 	unsigned int namelen;
 	u32 flags;
@@ -1399,7 +1411,7 @@
 	if (!dlm_grab(dlm))
 		return 0;
 
-	dlm_assert_master_to_host(assert);	
+	dlm_assert_master_to_host(assert);
 	name = assert->name;
 	namelen = assert->namelen;
 	flags = assert->flags;
@@ -1433,7 +1445,7 @@
 				     "back off\n", assert->node_idx, bit);
 			} else {
 				mlog(ML_ERROR, "expected %u to be the master, "
-				     "but %u is asserting! (%.*s)\n", bit, 
+				     "but %u is asserting! (%.*s)\n", bit,
 				     assert->node_idx, namelen, name);
 				BUG();
 			}
@@ -1467,14 +1479,14 @@
 				}
 				mlog(ML_ERROR, "got assert_master from "
 				     "node %u, but %u is the owner! "
-				     "(%.*s)\n", assert->node_idx, 
+				     "(%.*s)\n", assert->node_idx,
 				     res->owner, namelen, name);
 				goto kill;
 			}
 			if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
 				mlog(ML_ERROR, "got assert from %u, but lock "
 				     "with no owner should be "
-				     "in-progress! (%.*s)\n", 
+				     "in-progress! (%.*s)\n",
 				     assert->node_idx,
 				     namelen, name);
 				goto kill;
@@ -1484,7 +1496,7 @@
 			if (assert->node_idx != mle->new_master) {
 				mlog(ML_ERROR, "got assert from %u, but "
 				     "new master is %u, and old master "
-				     "was %u (%.*s)\n", 
+				     "was %u (%.*s)\n",
 				     assert->node_idx, mle->new_master,
 				     mle->master, namelen, name);
 				goto kill;
@@ -1496,7 +1508,7 @@
 	}
 	spin_unlock(&dlm->spinlock);
 
-	// mlog(0, "woo!  got an assert_master from node %u!\n", 
+	// mlog(0, "woo!  got an assert_master from node %u!\n",
 	// 	     assert->node_idx);
 	if (mle) {
 		spin_lock(&mle->spinlock);
@@ -1515,9 +1527,9 @@
 		/* master is known, detach if not already detached */
 		dlm_mle_detach_hb_events(dlm, mle);
 		dlm_put_mle(mle);
-		/* the assert master message now balances the extra 
+		/* the assert master message now balances the extra
 		 * ref given by the master request message.
-		 * if this is the last put, it will be removed 
+		 * if this is the last put, it will be removed
 		 * from the list. */
 		dlm_put_mle(mle);
 	}
@@ -1537,10 +1549,11 @@
 	return -EINVAL;
 }
 
-int dlm_dispatch_assert_master(dlm_ctxt *dlm, dlm_lock_resource *res, 
+int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
+			       struct dlm_lock_resource *res,
 			       int ignore_higher, u8 request_from, u32 flags)
 {
-	dlm_work_item *item;
+	struct dlm_work_item *item;
 	item = kcalloc(1, sizeof(*item), GFP_KERNEL);
 	if (!item)
 		return -ENOMEM;
@@ -1554,7 +1567,7 @@
 	item->u.am.ignore_higher = ignore_higher;
 	item->u.am.request_from = request_from;
 	item->u.am.flags = flags;
-	
+
 	spin_lock(&dlm->work_lock);
 	list_add_tail(&item->list, &dlm->work_list);
 	spin_unlock(&dlm->work_lock);
@@ -1563,11 +1576,11 @@
 	return 0;
 }
 
-void dlm_assert_master_worker(dlm_work_item *item, void *data)
+void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
 {
-	dlm_ctxt *dlm = data;
+	struct dlm_ctxt *dlm = data;
 	int ret = 0;
-	dlm_lock_resource *res;
+	struct dlm_lock_resource *res;
 	unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
 	int ignore_higher;
 	int bit;
@@ -1586,13 +1599,13 @@
 
 	clear_bit(dlm->node_num, nodemap);
 	if (ignore_higher) {
-		/* if is this just to clear up mles for nodes below 
+		/* if is this just to clear up mles for nodes below
 		 * this node, do not send the message to the original
 		 * caller or any node number higher than this */
 		clear_bit(request_from, nodemap);
 		bit = dlm->node_num;
 		while (1) {
-			bit = find_next_bit(nodemap, O2NM_MAX_NODES, 
+			bit = find_next_bit(nodemap, O2NM_MAX_NODES,
 					    bit+1);
 		       	if (bit >= O2NM_MAX_NODES)
 				break;
@@ -1603,7 +1616,7 @@
 	/* this call now finishes out the nodemap
 	 * even if one or more nodes die */
 	ret = dlm_do_assert_master(dlm, res->lockname.name,
-				   res->lockname.len, 
+				   res->lockname.len,
 				   nodemap, flags);
 	if (ret < 0) {
 		/* no need to restart, we are done */
@@ -1621,30 +1634,31 @@
  */
 
 
-int dlm_migrate_lockres(dlm_ctxt *dlm, dlm_lock_resource *res, u8 target)
+int dlm_migrate_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+			u8 target)
 {
-	dlm_master_list_entry *mle = NULL;
-	dlm_master_list_entry *oldmle = NULL;
- 	dlm_migratable_lockres *mres = NULL;
+	struct dlm_master_list_entry *mle = NULL;
+	struct dlm_master_list_entry *oldmle = NULL;
+ 	struct dlm_migratable_lockres *mres = NULL;
 	int ret = -EINVAL;
 	const char *name;
 	unsigned int namelen;
 	int mle_added = 0;
 	struct list_head *queue, *iter;
 	int i;
-	dlm_lock *lock;
+	struct dlm_lock *lock;
 	int empty = 1;
 
 	if (!dlm_grab(dlm))
 		return -EINVAL;
-	
+
 	name = res->lockname.name;
 	namelen = res->lockname.len;
 
 	mlog(0, "migrating %.*s to %u\n", namelen, name, target);
 
-	/* 
-	 * ensure this lockres is a proper candidate for migration 
+	/*
+	 * ensure this lockres is a proper candidate for migration
 	 */
 	spin_lock(&res->spinlock);
 	if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
@@ -1661,13 +1675,13 @@
 	queue = &res->granted;
 	for (i=0; i<3; i++) {
 		list_for_each(iter, queue) {
-			lock = list_entry (iter, dlm_lock, list);
+			lock = list_entry (iter, struct dlm_lock, list);
 			empty = 0;
 			if (lock->ml.node == dlm->node_num) {
 				mlog(0, "found a lock owned by this node "
 				     "still on the %s queue!  will not "
 				     "migrate this lockres\n",
-				     i==0 ? "granted" : 
+				     i==0 ? "granted" :
 				     (i==1 ? "converting" : "blocked"));
 				spin_unlock(&res->spinlock);
 				ret = -ENOTEMPTY;
@@ -1692,13 +1706,13 @@
 	 */
 
 	ret = -ENOMEM;
-	mres = (dlm_migratable_lockres *) __get_free_page(GFP_KERNEL);
+	mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_KERNEL);
 	if (!mres) {
 		mlog_errno(ret);
 		goto leave;
 	}
 
-	mle = (dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
+	mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
 							 GFP_KERNEL);
 	if (!mle) {
 		ret = -ENOMEM;
@@ -1718,7 +1732,7 @@
 		target = dlm_pick_migration_target(dlm, res);
 	}
 	mlog(0, "node %u chosen for migration\n", target);
-	
+
 	if (target >= O2NM_MAX_NODES ||
 	    !test_bit(target, dlm->domain_map)) {
 		/* target chosen is not alive */
@@ -1732,12 +1746,12 @@
 
 	mlog(0, "continuing with target = %u\n", target);
 
-	/* 
+	/*
 	 * clear any existing master requests and
-	 * add the migration mle to the list 
-	 */	
+	 * add the migration mle to the list
+	 */
 	spin_lock(&dlm->master_lock);
-	ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name, 
+	ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
 				    namelen, target, dlm->node_num);
 	spin_unlock(&dlm->master_lock);
 	spin_unlock(&dlm->spinlock);
@@ -1753,7 +1767,7 @@
 	 */
 	dlm_mark_lockres_migrating(dlm, res);
 
-fail:	
+fail:
 	if (oldmle) {
 		/* master is known, detach if not already detached */
 		dlm_mle_detach_hb_events(dlm, oldmle);
@@ -1769,10 +1783,10 @@
 		}
 		goto leave;
 	}
-	
+
 	/*
 	 * at this point, we have a migration target, an mle
-	 * in the master list, and the MIGRATING flag set on 
+	 * in the master list, and the MIGRATING flag set on
 	 * the lockres
 	 */
 
@@ -1781,12 +1795,12 @@
 	 * otherwise the assert_master from the new
 	 * master will destroy this. */
 	dlm_get_mle(mle);
-	
+
 	/* notify new node and send all lock state */
 	/* call send_one_lockres with migration flag.
 	 * this serves as notice to the target node that a
 	 * migration is starting. */
-	ret = dlm_send_one_lockres(dlm, res, mres, target, 
+	ret = dlm_send_one_lockres(dlm, res, mres, target,
 				   DLM_MRES_MIGRATION);
 
 	if (ret < 0) {
@@ -1799,12 +1813,12 @@
 		goto leave;
 	}
 
-	/* at this point, the target sends a message to all nodes, 
+	/* at this point, the target sends a message to all nodes,
 	 * (using dlm_do_migrate_request).  this node is skipped since
 	 * we had to put an mle in the list to begin the process.  this
 	 * node now waits for target to do an assert master.  this node
 	 * will be the last one notified, ensuring that the migration
-	 * is complete everywhere.  if the target dies while this is 
+	 * is complete everywhere.  if the target dies while this is
 	 * going on, some nodes could potentially see the target as the
 	 * master, so it is important that my recovery finds the migration
 	 * mle and sets the master to UNKNONWN. */
@@ -1812,8 +1826,8 @@
 
 	/* wait for new node to assert master */
 	while (1) {
-		ret = wait_event_interruptible_timeout(mle->wq, 
-					(atomic_read(&mle->woken) == 1), 
+		ret = wait_event_interruptible_timeout(mle->wq,
+					(atomic_read(&mle->woken) == 1),
 					msecs_to_jiffies(5000));
 
 		if (ret >= 0) {
@@ -1840,7 +1854,7 @@
 	dlm_remove_nonlocal_locks(dlm, res);
 	spin_unlock(&res->spinlock);
 	wake_up(&res->wq);
-	
+
 	/* master is known, detach if not already detached */
 	dlm_mle_detach_hb_events(dlm, mle);
 	dlm_put_mle(mle);
@@ -1859,14 +1873,15 @@
 EXPORT_SYMBOL_GPL(dlm_migrate_lockres);
 
 
-static void dlm_mark_lockres_migrating(dlm_ctxt *dlm, dlm_lock_resource *res)
+static void dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
+				       struct dlm_lock_resource *res)
 {
-	/* need to set MIGRATING flag on lockres.  this is done by 
+	/* need to set MIGRATING flag on lockres.  this is done by
 	 * ensuring that all asts have been flushed for this lockres. */
 	spin_lock(&res->spinlock);
 	BUG_ON(res->migration_pending);
 	res->migration_pending = 1;
-	/* strategy is to reserve an extra ast then release 
+	/* strategy is to reserve an extra ast then release
 	 * it below, letting the release do all of the work */
 	__dlm_lockres_reserve_ast(res);
 	spin_unlock(&res->spinlock);
@@ -1875,19 +1890,19 @@
 	dlm_flush_lockres_asts(dlm, res);
 	dlm_lockres_release_ast(res);
 
-	/* if the extra ref we just put was the final one, this 
+	/* if the extra ref we just put was the final one, this
 	 * will pass thru immediately.  otherwise, we need to wait
-	 * for the last ast to finish. */	
+	 * for the last ast to finish. */
 	spin_lock(&res->spinlock);
 	__dlm_wait_on_lockres_flags_set(res, DLM_LOCK_RES_MIGRATING);
 	spin_unlock(&res->spinlock);
 
-	/* 
+	/*
 	 * at this point:
 	 *
 	 *   o the DLM_LOCK_RES_MIGRATING flag is set
 	 *   o there are no pending asts on this lockres
-	 *   o all processes trying to reserve an ast on this 
+	 *   o all processes trying to reserve an ast on this
 	 *     lockres must wait for the MIGRATING flag to clear
 	 */
 }
@@ -1895,12 +1910,13 @@
 /* last step in the migration process.
  * original master calls this to free all of the dlm_lock
  * structures that used to be for other nodes. */
-static void dlm_remove_nonlocal_locks(dlm_ctxt *dlm, dlm_lock_resource *res)
+static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
+				      struct dlm_lock_resource *res)
 {
 	struct list_head *iter, *iter2;
 	struct list_head *queue = &res->granted;
 	int i;
-	dlm_lock *lock;
+	struct dlm_lock *lock;
 
 	assert_spin_locked(&res->spinlock);
 
@@ -1908,7 +1924,7 @@
 
 	for (i=0; i<3; i++) {
 		list_for_each_safe(iter, iter2, queue) {
-			lock = list_entry (iter, dlm_lock, list);
+			lock = list_entry (iter, struct dlm_lock, list);
 			if (lock->ml.node != dlm->node_num) {
 				mlog(0, "putting lock for node %u\n",
 				     lock->ml.node);
@@ -1920,21 +1936,22 @@
 				list_del_init(&lock->list);
 				dlm_lock_put(lock);
 			}
-		}	
+		}
 		queue++;
 	}
 }
 
-/* for now this is not too intelligent.  we will 
- * need stats to make this do the right thing. 
+/* for now this is not too intelligent.  we will
+ * need stats to make this do the right thing.
  * this just finds the first lock on one of the
  * queues and uses that node as the target. */
-static u8 dlm_pick_migration_target(dlm_ctxt *dlm, dlm_lock_resource *res)
+static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
+				    struct dlm_lock_resource *res)
 {
 	int i;
 	struct list_head *queue = &res->granted;
 	struct list_head *iter;
-	dlm_lock *lock;
+	struct dlm_lock *lock;
 	int nodenum;
 
 	assert_spin_locked(&dlm->spinlock);
@@ -1944,7 +1961,7 @@
 		list_for_each(iter, queue) {
 			/* up to the caller to make sure this node
 			 * is alive */
-			lock = list_entry (iter, dlm_lock, list);
+			lock = list_entry (iter, struct dlm_lock, list);
 			if (lock->ml.node != dlm->node_num) {
 				spin_unlock(&res->spinlock);
 				return lock->ml.node;
@@ -1958,7 +1975,7 @@
 	/* ok now we're getting desperate.  pick anyone alive. */
 	nodenum = -1;
 	while (1) {
-		nodenum = find_next_bit(dlm->domain_map, 
+		nodenum = find_next_bit(dlm->domain_map,
 					O2NM_MAX_NODES, nodenum+1);
 		mlog(0, "found %d in domain map\n", nodenum);
 		if (nodenum >= O2NM_MAX_NODES)
@@ -1977,13 +1994,13 @@
 
 /* this is called by the new master once all lockres
  * data has been received */
-int dlm_do_migrate_request(dlm_ctxt *dlm, dlm_lock_resource *res, 
-			   u8 master, u8 new_master, dlm_node_iter *iter)
+int dlm_do_migrate_request(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+			   u8 master, u8 new_master, struct dlm_node_iter *iter)
 {
-	dlm_migrate_request migrate;
+	struct dlm_migrate_request migrate;
 	int ret, status = 0;
 	int nodenum;
-	
+
 	memset(&migrate, 0, sizeof(migrate));
 	migrate.namelen = res->lockname.len;
 	memcpy(migrate.name, res->lockname.name, migrate.namelen);
@@ -2000,13 +2017,13 @@
 		    nodenum == new_master)
 			continue;
 
-		ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key, 
+		ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
 					 &migrate, sizeof(migrate), nodenum,
 					 &status);
 		if (ret < 0)
 			mlog_errno(ret);
 		else if (status < 0) {
-			mlog(0, "migrate request (node %u) returned %d!\n", 
+			mlog(0, "migrate request (node %u) returned %d!\n",
 			     nodenum, status);
 			ret = status;
 		}
@@ -2020,19 +2037,19 @@
 }
 
 
-/* if there is an existing mle for this lockres, we now know who the master is. 
+/* if there is an existing mle for this lockres, we now know who the master is.
  * (the one who sent us *this* message) we can clear it up right away.
- * since the process that put the mle on the list still has a reference to it, 
+ * since the process that put the mle on the list still has a reference to it,
  * we can unhash it now, set the master and wake the process.  as a result,
- * we will have no mle in the list to start with.  now we can add an mle for 
- * the migration and this should be the only one found for those scanning the 
+ * we will have no mle in the list to start with.  now we can add an mle for
+ * the migration and this should be the only one found for those scanning the
  * list.  */
 int dlm_migrate_request_handler(o2net_msg *msg, u32 len, void *data)
 {
-	dlm_ctxt *dlm = data;
-	dlm_lock_resource *res = NULL;
-	dlm_migrate_request *migrate = (dlm_migrate_request *) msg->buf;
-	dlm_master_list_entry *mle = NULL, *oldmle = NULL;
+	struct dlm_ctxt *dlm = data;
+	struct dlm_lock_resource *res = NULL;
+	struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
+	struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
 	const char *name;
 	unsigned int namelen;
 	int ret = 0;
@@ -2045,7 +2062,7 @@
 	namelen = migrate->namelen;
 
 	/* preallocate.. if this fails, abort */
-	mle = (dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
+	mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
 							 GFP_KERNEL);
 
 	if (!mle) {
@@ -2076,9 +2093,9 @@
 	}
 
 	/* ignore status.  only nonzero status would BUG. */
-	ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, 
-				    name, namelen, 
-				    migrate->new_master, 
+	ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
+				    name, namelen,
+				    migrate->new_master,
 				    migrate->master);
 
 unlock:
@@ -2098,17 +2115,17 @@
 	return ret;
 }
 
-/* must be holding dlm->spinlock and dlm->master_lock 
+/* must be holding dlm->spinlock and dlm->master_lock
  * when adding a migration mle, we can clear any other mles
  * in the master list because we know with certainty that
- * the master is "master".  so we remove any old mle from 
+ * the master is "master".  so we remove any old mle from
  * the list after setting it's master field, and then add
  * the new migration mle.  this way we can hold with the rule
  * of having only one mle for a given lock name at all times. */
-static int dlm_add_migration_mle(dlm_ctxt *dlm, 
-				 dlm_lock_resource *res, 
-				 dlm_master_list_entry *mle, 
-				 dlm_master_list_entry **oldmle, 
+static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
+				 struct dlm_lock_resource *res,
+				 struct dlm_master_list_entry *mle,
+				 struct dlm_master_list_entry **oldmle,
 				 const char *name, unsigned int namelen,
 				 u8 new_master, u8 master)
 {
@@ -2125,7 +2142,7 @@
 	/* caller is responsible for any ref taken here on oldmle */
 	found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
 	if (found) {
-		dlm_master_list_entry *tmp = *oldmle;
+		struct dlm_master_list_entry *tmp = *oldmle;
 		spin_lock(&tmp->spinlock);
 		if (tmp->type == DLM_MLE_MIGRATION) {
 			if (master == dlm->node_num) {
@@ -2141,7 +2158,7 @@
 				     "master=%u new_master=%u // "
 				     "lockres=%.*s\n",
 				     tmp->master, tmp->new_master,
-				     master, new_master, 
+				     master, new_master,
 				     namelen, name);
 				BUG();
 			}
@@ -2169,11 +2186,11 @@
 }
 
 
-void dlm_clean_master_list(dlm_ctxt *dlm, u8 dead_node)
+void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
 {
 	struct list_head *iter, *iter2;
-	dlm_master_list_entry *mle;
-	dlm_lock_resource *res;
+	struct dlm_master_list_entry *mle;
+	struct dlm_lock_resource *res;
 
 	mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node);
 top:
@@ -2182,8 +2199,8 @@
 	/* clean the master list */
 	spin_lock(&dlm->master_lock);
 	list_for_each_safe(iter, iter2, &dlm->master_list) {
-		mle = list_entry(iter, dlm_master_list_entry, list);
-		
+		mle = list_entry(iter, struct dlm_master_list_entry, list);
+
 		BUG_ON(mle->type != DLM_MLE_BLOCK &&
 		       mle->type != DLM_MLE_MASTER &&
 		       mle->type != DLM_MLE_MIGRATION);
@@ -2209,7 +2226,7 @@
 				spin_unlock(&mle->spinlock);
 			} else {
 				/* must drop the refcount by one since the
-				 * assert_master will never arrive.  this 
+				 * assert_master will never arrive.  this
 				 * may result in the mle being unlinked and
 				 * freed, but there may still be a process
 				 * waiting in the dlmlock path which is fine. */
@@ -2230,9 +2247,9 @@
 		 * becomes UNKNOWN if *either* the original or
 		 * the new master dies.  all UNKNOWN lockreses
 		 * are sent to whichever node becomes the recovery
-		 * master.  the new master is responsible for 
-		 * determining if there is still a master for 
-		 * this lockres, or if he needs to take over 
+		 * master.  the new master is responsible for
+		 * determining if there is still a master for
+		 * this lockres, or if he needs to take over
 		 * mastery.  either way, this node should expect
 		 * another message to resolve this. */
 		if (mle->master != dead_node &&
@@ -2242,32 +2259,32 @@
 		/* if we have reached this point, this mle needs to
 		 * be removed from the list and freed. */
 
-		/* remove from the list early.  NOTE: unlinking 
+		/* remove from the list early.  NOTE: unlinking
 		 * list_head while in list_for_each_safe */
 		spin_lock(&mle->spinlock);
 		list_del_init(&mle->list);
 		atomic_set(&mle->woken, 1);
 		spin_unlock(&mle->spinlock);
 		wake_up(&mle->wq);
-				
+
 		mlog(0, "node %u died during migration from "
-		     "%u to %u!\n", dead_node, 
+		     "%u to %u!\n", dead_node,
 		     mle->master, mle->new_master);
 		/* if there is a lockres associated with this
 	 	 * mle, find it and set its owner to UNKNOWN */
-		res = __dlm_lookup_lockres(dlm, mle->u.name.name, 
+		res = __dlm_lookup_lockres(dlm, mle->u.name.name,
 					mle->u.name.len);
 		if (res) {
-			/* unfortunately if we hit this rare case, our 
+			/* unfortunately if we hit this rare case, our
 		 	 * lock ordering is messed.  we need to drop
 		 	 * the master lock so that we can take the
 		  	 * lockres lock, meaning that we will have to
 			 * restart from the head of list. */
 			spin_unlock(&dlm->master_lock);
-		
+
 			/* move lockres onto recovery list */
 			spin_lock(&res->spinlock);
-			dlm_set_lockres_owner(dlm, res, 
+			dlm_set_lockres_owner(dlm, res,
 				      	DLM_LOCK_RES_OWNER_UNKNOWN);
 			dlm_move_lockres_to_recovery_list(dlm, res);
 			spin_unlock(&res->spinlock);
@@ -2281,7 +2298,7 @@
 			/* restart */
 			goto top;
 		}
-		
+
 		/* this may be the last reference */
 		__dlm_put_mle(mle);
 	}
@@ -2289,9 +2306,10 @@
 }
 
 
-int dlm_finish_migration(dlm_ctxt *dlm, dlm_lock_resource *res, u8 old_master)
+int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+			 u8 old_master)
 {
-	dlm_node_iter iter;
+	struct dlm_node_iter iter;
 	int ret = 0;
 
 	spin_lock(&dlm->spinlock);
@@ -2311,7 +2329,7 @@
 	mlog(0, "doing assert master to all except the original node\n");
 	/* this call now finishes out the nodemap
 	 * even if one or more nodes die */
-	ret = dlm_do_assert_master(dlm, res->lockname.name, 
+	ret = dlm_do_assert_master(dlm, res->lockname.name,
 				   res->lockname.len, iter.node_map,
 				   DLM_ASSERT_MASTER_FINISH_MIGRATION);
 	if (ret < 0) {
@@ -2353,7 +2371,7 @@
  * this should be called only after waiting on the lockres
  * with dlm_wait_on_lockres, and while still holding the
  * spinlock after the call. */
-void __dlm_lockres_reserve_ast(dlm_lock_resource *res)
+void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
 {
 	assert_spin_locked(&res->spinlock);
 	BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
@@ -2361,20 +2379,20 @@
 	atomic_inc(&res->asts_reserved);
 }
 
-/* 
- * used to drop the reserved ast, either because it went unused, 
+/*
+ * used to drop the reserved ast, either because it went unused,
  * or because the ast/bast was actually called.
  *
- * also, if there is a pending migration on this lockres, 
- * and this was the last pending ast on the lockres, 
- * atomically set the MIGRATING flag before we drop the lock.  
- * this is how we ensure that migration can proceed with no 
- * asts in progress.  note that it is ok if the state of the 
+ * also, if there is a pending migration on this lockres,
+ * and this was the last pending ast on the lockres,
+ * atomically set the MIGRATING flag before we drop the lock.
+ * this is how we ensure that migration can proceed with no
+ * asts in progress.  note that it is ok if the state of the
  * queues is such that a lock should be granted in the future
  * or that a bast should be fired, because the new master will
  * shuffle the lists on this lockres as soon as it is migrated.
  */
-void dlm_lockres_release_ast(dlm_lock_resource *res)
+void dlm_lockres_release_ast(struct dlm_lock_resource *res)
 {
 	if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
 		return;

Modified: trunk/fs/ocfs2/dlm/dlmrecovery.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmrecovery.c	2005-06-29 22:47:38 UTC (rev 2447)
+++ trunk/fs/ocfs2/dlm/dlmrecovery.c	2005-06-30 00:30:29 UTC (rev 2448)
@@ -52,48 +52,54 @@
 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
 #include "cluster/masklog.h"
 
-static void dlm_do_local_recovery_cleanup(dlm_ctxt *dlm, u8 dead_node);
+static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
 
 static int dlm_recovery_thread(void *data);
-void dlm_complete_recovery_thread(dlm_ctxt *dlm);
-int dlm_launch_recovery_thread(dlm_ctxt *dlm);
-void dlm_kick_recovery_thread(dlm_ctxt *dlm);
-int dlm_do_recovery(dlm_ctxt *dlm);
+void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
+int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
+void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
+int dlm_do_recovery(struct dlm_ctxt *dlm);
 
-int dlm_pick_recovery_master(dlm_ctxt *dlm);
-static int dlm_remaster_locks(dlm_ctxt *dlm, u8 dead_node);
-int dlm_init_recovery_area(dlm_ctxt *dlm, u8 dead_node);
-int dlm_request_all_locks(dlm_ctxt *dlm, u8 request_from, u8 dead_node);
-void dlm_destroy_recovery_area(dlm_ctxt *dlm, u8 dead_node);
+int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
+static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
+int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
+int dlm_request_all_locks(struct dlm_ctxt *dlm,
+			  u8 request_from, u8 dead_node);
+void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
 
-static inline int dlm_num_locks_in_lockres(dlm_lock_resource *res);
-static void dlm_init_migratable_lockres(dlm_migratable_lockres *mres,
-					const char *lockname, int namelen, 
-					int total_locks, u64 cookie, 
+static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
+static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
+					const char *lockname, int namelen,
+					int total_locks, u64 cookie,
 					u8 flags, u8 master);
-static int dlm_send_mig_lockres_msg(dlm_ctxt *dlm, 
-				    dlm_migratable_lockres *mres, 
+static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
+				    struct dlm_migratable_lockres *mres,
 				    u8 send_to,
-				    dlm_lock_resource *res,
+				    struct dlm_lock_resource *res,
 				    int total_locks);
-static int dlm_lockres_master_requery(dlm_ctxt *dlm, dlm_lock_resource *res, 
+static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
+				      struct dlm_lock_resource *res,
 				      u8 *real_master);
-static int dlm_process_recovery_data(dlm_ctxt *dlm, dlm_lock_resource *res,
-				     dlm_migratable_lockres *mres);
-static int dlm_do_master_requery(dlm_ctxt *dlm, dlm_lock_resource *res, 
+static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
+				     struct dlm_lock_resource *res,
+				     struct dlm_migratable_lockres *mres);
+static int dlm_do_master_requery(struct dlm_ctxt *dlm,
+				 struct dlm_lock_resource *res,
 				 u8 nodenum, u8 *real_master);
-static int dlm_send_finalize_reco_message(dlm_ctxt *dlm);
-static int dlm_send_all_done_msg(dlm_ctxt *dlm, u8 dead_node, u8 send_to);
-static int dlm_send_begin_reco_message(dlm_ctxt *dlm, u8 dead_node);
-static void dlm_move_reco_locks_to_list(dlm_ctxt *dlm, struct list_head *list,
-				       	u8 dead_node);
-static void dlm_finish_local_lockres_recovery(dlm_ctxt *dlm, u8 dead_node,
-					      u8 new_master);
+static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
+static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
+				 u8 dead_node, u8 send_to);
+static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
+static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
+					struct list_head *list, u8 dead_node);
+static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
+					      u8 dead_node, u8 new_master);
 static void dlm_reco_ast(void *astdata);
 static void dlm_reco_bast(void *astdata, int blocked_type);
-static void dlm_reco_unlock_ast(void *astdata, dlm_status st);
-static void dlm_request_all_locks_worker(dlm_work_item *item, void *data);
-static void dlm_mig_lockres_worker(dlm_work_item *item, void *data);
+static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
+static void dlm_request_all_locks_worker(struct dlm_work_item *item,
+					 void *data);
+static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
 
 static u64 dlm_get_next_mig_cookie(void);
 
@@ -114,7 +120,7 @@
 	return c;
 }
 
-static inline void dlm_reset_recovery(dlm_ctxt *dlm)
+static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
 {
 	spin_lock(&dlm->spinlock);
 	clear_bit(dlm->reco.dead_node, dlm->recovery_map);
@@ -126,10 +132,10 @@
 /* Worker function used during recovery. */
 void dlm_dispatch_work(void *data)
 {
-	dlm_ctxt *dlm = (dlm_ctxt *)data;
+	struct dlm_ctxt *dlm = (struct dlm_ctxt *)data;
 	LIST_HEAD(tmp_list);
 	struct list_head *iter, *iter2;
-	dlm_work_item *item;
+	struct dlm_work_item *item;
 	dlm_workfunc_t *workfunc;
 
 	spin_lock(&dlm->work_lock);
@@ -137,14 +143,14 @@
 	spin_unlock(&dlm->work_lock);
 
 	list_for_each_safe(iter, iter2, &tmp_list) {
-		item = list_entry(iter, dlm_work_item, list);
+		item = list_entry(iter, struct dlm_work_item, list);
 		workfunc = item->func;
 		list_del_init(&item->list);
 
 		/* already have ref on dlm to avoid having
 		 * it disappear.  just double-check. */
 		BUG_ON(item->dlm != dlm);
-	
+
 		/* this is allowed to sleep and
 		 * call network stuff */
 		workfunc(item, item->data);
@@ -158,23 +164,23 @@
  * RECOVERY THREAD
  */
 
-void dlm_kick_recovery_thread(dlm_ctxt *dlm)
+void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
 {
-	/* wake the recovery thread 
+	/* wake the recovery thread
 	 * this will wake the reco thread in one of three places
 	 * 1) sleeping with no recovery happening
-	 * 2) sleeping with recovery mastered elsewhere 
+	 * 2) sleeping with recovery mastered elsewhere
 	 * 3) recovery mastered here, waiting on reco data */
 
 	wake_up(&dlm->dlm_reco_thread_wq);
 }
 
 /* Launch the recovery thread */
-int dlm_launch_recovery_thread(dlm_ctxt *dlm)
+int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
 {
 	mlog(0, "starting dlm recovery thread...\n");
 
-	dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm, 
+	dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
 						"dlm_reco_thread");
 	if (IS_ERR(dlm->dlm_reco_thread_task)) {
 		mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
@@ -185,7 +191,7 @@
 	return 0;
 }
 
-void dlm_complete_recovery_thread(dlm_ctxt *dlm)
+void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
 {
 	if (dlm->dlm_reco_thread_task) {
 		mlog(0, "waiting for dlm recovery thread to exit\n");
@@ -196,7 +202,7 @@
 
 
 
-/* 
+/*
  * this is lame, but here's how recovery works...
  * 1) all recovery threads cluster wide will work on recovering
  *    ONE node at a time
@@ -209,11 +215,11 @@
  *    one lock at a time, forcing each node to communicate back
  *    before continuing
  * 6) each secondary lock queue responds with the full known lock info
- * 7) once the new master has run all its locks, it sends a ALLDONE! 
+ * 7) once the new master has run all its locks, it sends a ALLDONE!
  *    message to everyone
  * 8) upon receiving this message, the secondary queue node unlocks
  *    and responds to the ALLDONE
- * 9) once the new master gets responses from everyone, he unlocks 
+ * 9) once the new master gets responses from everyone, he unlocks
  *    everything and recovery for this dead node is done
  *10) go back to 2) while there are still dead nodes
  *
@@ -225,7 +231,7 @@
 static int dlm_recovery_thread(void *data)
 {
 	int status;
-	dlm_ctxt *dlm = data;
+	struct dlm_ctxt *dlm = data;
 	unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
 
 	mlog(0, "dlm thread running for %s...\n", dlm->name);
@@ -249,14 +255,14 @@
 	mlog(0, "quitting DLM recovery thread\n");
 	return 0;
 }
-	
+
 /* callers of the top-level api calls (dlmlock/dlmunlock) should
- * block on the dlm->reco.event when recovery is in progress.  
+ * block on the dlm->reco.event when recovery is in progress. 
  * the dlm recovery thread will set this state when it begins
  * recovering a dead node (as the new master or not) and clear
  * the state and wake as soon as all affected lock resources have
  * been marked with the RECOVERY flag */
-static int dlm_in_recovery(dlm_ctxt *dlm)
+static int dlm_in_recovery(struct dlm_ctxt *dlm)
 {
 	int in_recovery;
 	spin_lock(&dlm->spinlock);
@@ -266,12 +272,12 @@
 }
 
 
-void dlm_wait_for_recovery(dlm_ctxt *dlm)
+void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
 {
 	wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
 }
 
-static void dlm_begin_recovery(dlm_ctxt *dlm)
+static void dlm_begin_recovery(struct dlm_ctxt *dlm)
 {
 	spin_lock(&dlm->spinlock);
 	BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
@@ -279,7 +285,7 @@
 	spin_unlock(&dlm->spinlock);
 }
 
-static void dlm_end_recovery(dlm_ctxt *dlm)
+static void dlm_end_recovery(struct dlm_ctxt *dlm)
 {
 	spin_lock(&dlm->spinlock);
 	BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
@@ -288,7 +294,7 @@
 	wake_up(&dlm->reco.event);
 }
 
-int dlm_do_recovery(dlm_ctxt *dlm)
+int dlm_do_recovery(struct dlm_ctxt *dlm)
 {
 	int status = 0;
 
@@ -310,7 +316,7 @@
 		bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0);
 		if (bit >= O2NM_MAX_NODES || bit < 0)
 			dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
-		else 
+		else
 			dlm->reco.dead_node = bit;
 	} else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
 		/* BUG? */
@@ -345,8 +351,8 @@
 		}
 		mlog(0, "another node will master this recovery session.\n");
 	}
-	mlog(0, "dlm=%s, new_master=%u, this node=%u, dead_node=%u\n", 
-	     dlm->name, dlm->reco.new_master, 
+	mlog(0, "dlm=%s, new_master=%u, this node=%u, dead_node=%u\n",
+	     dlm->name, dlm->reco.new_master,
 	     dlm->node_num, dlm->reco.dead_node);
 
 	/* it is safe to start everything back up here
@@ -358,7 +364,7 @@
 	return 0;
 
 master_here:
-	mlog(0, "mastering recovery of %s:%u here(this=%u)!\n", 
+	mlog(0, "mastering recovery of %s:%u here(this=%u)!\n",
 	     dlm->name, dlm->reco.dead_node, dlm->node_num);
 
 	status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
@@ -375,22 +381,15 @@
 	return -EAGAIN;
 }
 
-static int dlm_remaster_locks(dlm_ctxt *dlm, u8 dead_node)
+static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
 {
 	int status = 0;
-	dlm_reco_node_data *ndata;
+	struct dlm_reco_node_data *ndata;
 	struct list_head *iter;
 	int all_nodes_done;
 	int destroy = 0;
 	int pass = 0;
 
-/* +- if this node is the new master, init the temp recovery area */
-/* |- poll each live node for lock state */
-/* |- collect the data from each node until node says it's done, or dead */
-/* +--- if node died, throw away temp recovery area, keep new_master and dead_node, goto "select a target" */
-/* |- apply all temp area changes to real lock */
-/* +- send ALL DONE message to each node */
-
 	status = dlm_init_recovery_area(dlm, dead_node);
 	if (status < 0)
 		goto leave;
@@ -398,11 +397,11 @@
 	/* safe to access the node data list without a lock, since this
 	 * process is the only one to change the list */
 	list_for_each(iter, &dlm->reco.node_data) {
-		ndata = list_entry (iter, dlm_reco_node_data, list);
+		ndata = list_entry (iter, struct dlm_reco_node_data, list);
 		BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
 		ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
 
-		mlog(0, "requesting lock info from node %u\n", 
+		mlog(0, "requesting lock info from node %u\n",
 		     ndata->node_num);
 
 		if (ndata->node_num == dlm->node_num) {
@@ -448,7 +447,7 @@
 				break;
 		}
 	}
-		
+	
 	mlog(0, "done requesting all lock info\n");
 
 	/* nodes should be sending reco data now
@@ -460,7 +459,7 @@
 		all_nodes_done = 1;
 		spin_lock(&dlm_reco_state_lock);
 		list_for_each(iter, &dlm->reco.node_data) {
-			ndata = list_entry (iter, dlm_reco_node_data, list);
+			ndata = list_entry (iter, struct dlm_reco_node_data, list);
 
 			mlog(0, "checking recovery state of node %u\n",
 			     ndata->node_num);
@@ -499,8 +498,8 @@
 		if (all_nodes_done) {
 			int ret;
 
-			/* all nodes are now in DLM_RECO_NODE_DATA_DONE state 
-	 		 * just send a finalize message to everyone and 
+			/* all nodes are now in DLM_RECO_NODE_DATA_DONE state
+	 		 * just send a finalize message to everyone and
 	 		 * clean up */
 			mlog(0, "all nodes are done! send finalize\n");
 			ret = dlm_send_finalize_reco_message(dlm);
@@ -514,8 +513,8 @@
 			mlog(0, "should be done with recovery!\n");
 
 			mlog(0, "finishing recovery of %s at %lu, "
-			     "dead=%u, this=%u, new=%u\n", dlm->name, 
-			     jiffies, dlm->reco.dead_node, 
+			     "dead=%u, this=%u, new=%u\n", dlm->name,
+			     jiffies, dlm->reco.dead_node,
 			     dlm->node_num, dlm->reco.new_master);
 			destroy = 1;
 			status = ret;
@@ -525,8 +524,8 @@
 		}
 		/* wait to be signalled, with periodic timeout
 		 * to check for node death */
-		wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, 
-					 kthread_should_stop(), 
+		wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
+					 kthread_should_stop(),
 					 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
 
 	}
@@ -539,17 +538,17 @@
 	return status;
 }
 
-int dlm_init_recovery_area(dlm_ctxt *dlm, u8 dead_node)
+int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
 {
 	int num=0;
-	dlm_reco_node_data *ndata;
+	struct dlm_reco_node_data *ndata;
 
 	spin_lock(&dlm->spinlock);
 	memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map));
 	/* nodes can only be removed (by dying) after dropping
 	 * this lock, and death will be trapped later, so this should do */
 	spin_unlock(&dlm->spinlock);
-	
+
 	while (1) {
 		num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
 		if (num >= O2NM_MAX_NODES) {
@@ -573,10 +572,10 @@
 	return 0;
 }
 
-void dlm_destroy_recovery_area(dlm_ctxt *dlm, u8 dead_node)
+void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
 {
 	struct list_head *iter, *iter2;
-	dlm_reco_node_data *ndata;
+	struct dlm_reco_node_data *ndata;
 	LIST_HEAD(tmplist);
 
 	spin_lock(&dlm_reco_state_lock);
@@ -584,16 +583,16 @@
 	spin_unlock(&dlm_reco_state_lock);
 
 	list_for_each_safe(iter, iter2, &tmplist) {
-		ndata = list_entry (iter, dlm_reco_node_data, list);
+		ndata = list_entry (iter, struct dlm_reco_node_data, list);
 		list_del_init(&ndata->list);
 		kfree(ndata);
 	}
 }
 
-int dlm_request_all_locks(dlm_ctxt *dlm, u8 request_from, u8 dead_node)
+int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from, u8 dead_node)
 {
-	dlm_lock_request lr;
-	dlm_status ret;
+	struct dlm_lock_request lr;
+	enum dlm_status ret;
 
 	mlog(0, "\n");
 
@@ -608,12 +607,12 @@
 	// send message
 	ret = DLM_NOLOCKMGR;
 	dlm_lock_request_to_net(&lr);
-	ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key, 
+	ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
 				 &lr, sizeof(lr), request_from, NULL);
 	if (ret < 0)
 		mlog_errno(ret);
 
-	// return from here, then 
+	// return from here, then
 	// sleep until all received or error
 	return ret;
 
@@ -621,11 +620,11 @@
 
 int dlm_request_all_locks_handler(o2net_msg *msg, u32 len, void *data)
 {
-	dlm_ctxt *dlm = data;
-	dlm_lock_request *lr = (dlm_lock_request *)msg->buf;
+	struct dlm_ctxt *dlm = data;
+	struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
 	char *buf = NULL;
-	dlm_work_item *item = NULL;
-	
+	struct dlm_work_item *item = NULL;
+
 	if (!dlm_grab(dlm))
 		return -EINVAL;
 
@@ -643,7 +642,7 @@
 	if (!buf) {
 		kfree(item);
 		dlm_put(dlm);
-		return -ENOMEM;	
+		return -ENOMEM;
 	}
 
 	/* queue up work for dlm_request_all_locks_worker */
@@ -660,11 +659,11 @@
 	return 0;
 }
 
-static void dlm_request_all_locks_worker(dlm_work_item *item, void *data)
+static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
 {
-	dlm_migratable_lockres *mres;
-	dlm_lock_resource *res;
-	dlm_ctxt *dlm;
+	struct dlm_migratable_lockres *mres;
+	struct dlm_lock_resource *res;
+	struct dlm_ctxt *dlm;
 	LIST_HEAD(resources);
 	struct list_head *iter;
 	int ret;
@@ -675,21 +674,21 @@
 	reco_master = item->u.ral.reco_master;
 	BUG_ON(dead_node != dlm->reco.dead_node);
 	BUG_ON(reco_master != dlm->reco.new_master);
-	
-	mres = (dlm_migratable_lockres *)data;
 
+	mres = (struct dlm_migratable_lockres *)data;
+
 	/* lock resources should have already been moved to the
  	 * dlm->reco.resources list.  now move items from that list
- 	 * to a temp list if the dead owner matches.  note that the 
-	 * whole cluster recovers only one node at a time, so we 
-	 * can safely move UNKNOWN lock resources for each recovery 
+ 	 * to a temp list if the dead owner matches.  note that the
+	 * whole cluster recovers only one node at a time, so we
+	 * can safely move UNKNOWN lock resources for each recovery
 	 * session. */
 	dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
 
 	/* now we can begin blasting lockreses without the dlm lock */
 	list_for_each(iter, &resources) {
-		res = list_entry (iter, dlm_lock_resource, recovering);
-		ret = dlm_send_one_lockres(dlm, res, mres, reco_master, 
+		res = list_entry (iter, struct dlm_lock_resource, recovering);
+		ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
 				   	DLM_MRES_RECOVERY);
 		if (ret < 0)
 			mlog_errno(ret);
@@ -708,20 +707,20 @@
 }
 
 
-static int dlm_send_all_done_msg(dlm_ctxt *dlm, u8 dead_node, u8 send_to)
+static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
 {
-	int ret, tmpret;	
-	dlm_reco_data_done done_msg;
+	int ret, tmpret;
+	struct dlm_reco_data_done done_msg;
 
 	memset(&done_msg, 0, sizeof(done_msg));
 	done_msg.node_idx = dlm->node_num;
 	done_msg.dead_node = dead_node;
 	mlog(0, "sending DATA DONE message to %u, "
-	     "my node=%u, dead node=%u\n", send_to, done_msg.node_idx, 
+	     "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
 	     done_msg.dead_node);
 	dlm_reco_data_done_to_net(&done_msg);
-	
-	ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg, 
+
+	ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
 				 sizeof(done_msg), send_to, &tmpret);
 	if (ret >= 0)
 		ret = tmpret;
@@ -731,24 +730,24 @@
 
 int dlm_reco_data_done_handler(o2net_msg *msg, u32 len, void *data)
 {
-	dlm_ctxt *dlm = data;
-	dlm_reco_data_done *done = (dlm_reco_data_done *)msg->buf;
+	struct dlm_ctxt *dlm = data;
+	struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
 	struct list_head *iter;
-	dlm_reco_node_data *ndata = NULL;
+	struct dlm_reco_node_data *ndata = NULL;
 	int ret = -EINVAL;
-	
+
 	if (!dlm_grab(dlm))
 		return -EINVAL;
 
 	dlm_reco_data_done_to_host(done);
 	mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
-	     "node_idx=%u, this node=%u\n", done->dead_node, 
+	     "node_idx=%u, this node=%u\n", done->dead_node,
 	     dlm->reco.dead_node, done->node_idx, dlm->node_num);
 	BUG_ON(done->dead_node != dlm->reco.dead_node);
 
 	spin_lock(&dlm_reco_state_lock);
 	list_for_each(iter, &dlm->reco.node_data) {
-		ndata = list_entry (iter, dlm_reco_node_data, list);
+		ndata = list_entry (iter, struct dlm_reco_node_data, list);
 		if (ndata->node_num != done->node_idx)
 			continue;
 
@@ -789,15 +788,16 @@
 	return ret;
 }
 
-static void dlm_move_reco_locks_to_list(dlm_ctxt *dlm, struct list_head *list,
+static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
+					struct list_head *list,
 				       	u8 dead_node)
 {
-	dlm_lock_resource *res;
+	struct dlm_lock_resource *res;
 	struct list_head *iter, *iter2;
-	
+
 	spin_lock(&dlm->spinlock);
 	list_for_each_safe(iter, iter2, &dlm->reco.resources) {
-		res = list_entry (iter, dlm_lock_resource, recovering);
+		res = list_entry (iter, struct dlm_lock_resource, recovering);
 		if (dlm_is_recovery_lock(res->lockname.name,
 					 res->lockname.len))
 			continue;
@@ -817,12 +817,12 @@
 	spin_unlock(&dlm->spinlock);
 }
 
-static inline int dlm_num_locks_in_lockres(dlm_lock_resource *res)
+static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
 {
 	int total_locks = 0;
 	struct list_head *iter, *queue = &res->granted;
 	int i;
-	
+
 	for (i=0; i<3; i++) {
 		list_for_each(iter, queue)
 			total_locks++;
@@ -832,26 +832,26 @@
 }
 
 
-static int dlm_send_mig_lockres_msg(dlm_ctxt *dlm,
-				      dlm_migratable_lockres *mres, 
+static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
+				      struct dlm_migratable_lockres *mres,
 				      u8 send_to,
-				      dlm_lock_resource *res,
+				      struct dlm_lock_resource *res,
 				      int total_locks)
 {
 	u64 mig_cookie = mres->mig_cookie;
 	int mres_total_locks = mres->total_locks;
 	int sz, ret = 0, status = 0;
-	u8 orig_flags = mres->flags, 
+	u8 orig_flags = mres->flags,
 	   orig_master = mres->master;
 
 	BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
 	if (!mres->num_locks)
 		return 0;
 
-	sz = sizeof(dlm_migratable_lockres) + 
-		(mres->num_locks * sizeof(dlm_migratable_lock));
+	sz = sizeof(struct dlm_migratable_lockres) +
+		(mres->num_locks * sizeof(struct dlm_migratable_lock));
 
-	/* add an all-done flag if we reached the last lock */ 
+	/* add an all-done flag if we reached the last lock */
 	orig_flags = mres->flags;
 	BUG_ON(total_locks > mres_total_locks);
 	if (total_locks == mres_total_locks)
@@ -861,7 +861,7 @@
 	dlm_migratable_lockres_to_net(mres);
 
 	/* send it */
-	ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres, 
+	ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
 				 sz, send_to, &status);
 	if (ret < 0) {
 		mlog_errno(ret);
@@ -880,15 +880,15 @@
 	}
 
 	/* zero and reinit the message buffer */
-	dlm_init_migratable_lockres(mres, res->lockname.name, 
-				    res->lockname.len, mres_total_locks, 
+	dlm_init_migratable_lockres(mres, res->lockname.name,
+				    res->lockname.len, mres_total_locks,
 				    mig_cookie, orig_flags, orig_master);
 	return ret;
 }
 
-static void dlm_init_migratable_lockres(dlm_migratable_lockres *mres,
-					const char *lockname, int namelen, 
-					int total_locks, u64 cookie, 
+static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
+					const char *lockname, int namelen,
+					int total_locks, u64 cookie,
 					u8 flags, u8 master)
 {
 	/* mres here is one full page */
@@ -902,13 +902,13 @@
 	mres->master = master;
 }
 
-		
+	
 /* returns 1 if this lock fills the network structure,
  * 0 otherwise */
-static int dlm_add_lock_to_array(dlm_lock *lock, 
-				 dlm_migratable_lockres *mres, int queue)
+static int dlm_add_lock_to_array(struct dlm_lock *lock,
+				 struct dlm_migratable_lockres *mres, int queue)
 {
-	dlm_migratable_lock *ml;
+	struct dlm_migratable_lock *ml;
 	int lock_num = mres->num_locks;
 
 	ml = &(mres->ml[lock_num]);
@@ -940,14 +940,14 @@
 }
 
 
-int dlm_send_one_lockres(dlm_ctxt *dlm, dlm_lock_resource *res, 
-			 dlm_migratable_lockres *mres, 
+int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+			 struct dlm_migratable_lockres *mres,
 			 u8 send_to, u8 flags)
 {
 	struct list_head *queue, *iter;
 	int total_locks, i;
 	u64 mig_cookie = 0;
-	dlm_lock *lock;
+	struct dlm_lock *lock;
 	int ret = 0;
 
 	BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
@@ -963,24 +963,24 @@
 		mig_cookie = dlm_get_next_mig_cookie();
 	}
 
-	dlm_init_migratable_lockres(mres, res->lockname.name, 
-				    res->lockname.len, total_locks, 
+	dlm_init_migratable_lockres(mres, res->lockname.name,
+				    res->lockname.len, total_locks,
 				    mig_cookie, flags, res->owner);
 
 	total_locks = 0;
 	for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
 		queue = dlm_list_idx_to_ptr(res, i);
 		list_for_each(iter, queue) {
-			lock = list_entry (iter, dlm_lock, list);
+			lock = list_entry (iter, struct dlm_lock, list);
 
 			/* add another lock. */
 			total_locks++;
 			if (!dlm_add_lock_to_array(lock, mres, i))
 				continue;
-			 
-			/* this filled the lock message, 
+			
+			/* this filled the lock message,
 			 * we must send it immediately. */
-			ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, 
+			ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
 						       res, total_locks);
 			if (ret < 0) {
 				// TODO
@@ -1004,7 +1004,7 @@
 
 
 /*
- * this message will contain no more than one page worth of 
+ * this message will contain no more than one page worth of
  * recovery data, and it will work on only one lockres.
  * there may be many locks in this page, and we may need to wait
  * for additional packets to complete all the locks (rare, but
@@ -1018,29 +1018,30 @@
 
 int dlm_mig_lockres_handler(o2net_msg *msg, u32 len, void *data)
 {
-	dlm_ctxt *dlm = data;
-	dlm_migratable_lockres *mres = (dlm_migratable_lockres *)msg->buf;
+	struct dlm_ctxt *dlm = data;
+	struct dlm_migratable_lockres *mres =
+		(struct dlm_migratable_lockres *)msg->buf;
 	int ret = 0;
 	u8 real_master;
 	char *buf = NULL;
-	dlm_work_item *item = NULL;
-	dlm_lock_resource *res = NULL;
+	struct dlm_work_item *item = NULL;
+	struct dlm_lock_resource *res = NULL;
 
 	if (!dlm_grab(dlm))
 		return -EINVAL;
-	
+
 	dlm_migratable_lockres_to_host(mres);
-	
+
 	BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
 
 	real_master = mres->master;
 	if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
-		/* cannot migrate a lockres with no master */	
+		/* cannot migrate a lockres with no master */
 		BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
 	}
 
-	mlog(0, "%s message received from node %u\n", 
-		  (mres->flags & DLM_MRES_RECOVERY) ? 
+	mlog(0, "%s message received from node %u\n",
+		  (mres->flags & DLM_MRES_RECOVERY) ?
 		  "recovery" : "migration", mres->master);
 	if (mres->flags & DLM_MRES_ALL_DONE)
 		mlog(0, "all done flag.  all lockres data received!\n");
@@ -1052,7 +1053,7 @@
 		goto leave;
 
 	/* lookup the lock to see if we have a secondary queue for this
-	 * already...  just add the locks in and this will have its owner 
+	 * already...  just add the locks in and this will have its owner
 	 * and RECOVERY flag changed when it completes. */
 	res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len);
 	if (res) {
@@ -1063,7 +1064,7 @@
 			res->state |= DLM_LOCK_RES_RECOVERING;
 		} else {
 			if (res->state & DLM_LOCK_RES_MIGRATING) {
-				/* this is at least the second 
+				/* this is at least the second
 				 * lockres message */
 				mlog(0, "lock %.*s is already migrating\n",
 					  mres->lockname_len,
@@ -1081,13 +1082,13 @@
 		}
 		spin_unlock(&res->spinlock);
 	} else {
-		/* need to allocate, just like if it was 
+		/* need to allocate, just like if it was
 		 * mastered here normally  */
 		res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
 		if (!res)
 			goto leave;
 
-		/* to match the ref that we would have gotten if 
+		/* to match the ref that we would have gotten if
 		 * dlm_lookup_lockres had succeeded */
 		dlm_lockres_get(res);
 
@@ -1096,7 +1097,7 @@
 			res->state |= DLM_LOCK_RES_RECOVERING;
 		else
 			res->state |= DLM_LOCK_RES_MIGRATING;
-	
+
 		spin_lock(&dlm->spinlock);
 		__dlm_insert_lockres(dlm, res);
 		spin_unlock(&dlm->spinlock);
@@ -1128,7 +1129,7 @@
 	list_add_tail(&item->list, &dlm->work_list);
 	spin_unlock(&dlm->work_lock);
 	schedule_work(&dlm->dispatched_work);
-	
+
 leave:
 	dlm_put(dlm);
 	if (ret < 0) {
@@ -1143,22 +1144,22 @@
 }
 
 
-static void dlm_mig_lockres_worker(dlm_work_item *item, void *data)
+static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
 {
-	dlm_ctxt *dlm = data;
-	dlm_migratable_lockres *mres;
+	struct dlm_ctxt *dlm = data;
+	struct dlm_migratable_lockres *mres;
 	int ret = 0;
-	dlm_lock_resource *res;
+	struct dlm_lock_resource *res;
 	u8 real_master;
 
 	dlm = item->dlm;
-	mres = (dlm_migratable_lockres *)data;
-	
+	mres = (struct dlm_migratable_lockres *)data;
+
 	res = item->u.ml.lockres;
 	real_master = item->u.ml.real_master;
 
 	if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
-		/* this case is super-rare. only occurs if 
+		/* this case is super-rare. only occurs if
 		 * node death happens during migration. */
 again:
 		ret = dlm_lockres_master_requery(dlm, res, &real_master);
@@ -1174,17 +1175,17 @@
 		} else {
 			mlog(0, "master needs to respond to sender "
 				  "that node %u still owns %.*s\n",
-				  real_master, res->lockname.len, 
+				  real_master, res->lockname.len,
 				  res->lockname.name);
 			/* cannot touch this lockres */
 			goto leave;
-		}	
+		}
 	}
-	
+
 	ret = dlm_process_recovery_data(dlm, res, mres);
 	if (ret < 0)
 		mlog(0, "dlm_process_recovery_data returned  %d\n", ret);
-	else 
+	else
 		mlog(0, "dlm_process_recovery_data succeeded\n");
 
 	if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
@@ -1193,7 +1194,7 @@
 		if (ret < 0)
 			mlog_errno(ret);
 	}
-	
+
 leave:
 	kfree(data);
 	mlog_exit(ret);
@@ -1201,13 +1202,14 @@
 
 
 
-static int dlm_lockres_master_requery(dlm_ctxt *dlm, dlm_lock_resource *res, 
+static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
+				      struct dlm_lock_resource *res,
 				      u8 *real_master)
 {
-	dlm_node_iter iter;
+	struct dlm_node_iter iter;
 	int nodenum;
 	int ret = 0;
-	
+
 	*real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
 
 	/* we only reach here if one of the two nodes in a
@@ -1216,21 +1218,21 @@
 	 * know that the new_master got as far as creating
 	 * an mle on at least one node, but we do not know
 	 * if any nodes had actually cleared the mle and set
-	 * the master to the new_master.  the old master 
-	 * is supposed to set the owner to UNKNOWN in the 
+	 * the master to the new_master.  the old master
+	 * is supposed to set the owner to UNKNOWN in the
 	 * event of a new_master death, so the only possible
-	 * responses that we can get from nodes here are 
+	 * responses that we can get from nodes here are
 	 * that the master is new_master, or that the master
-	 * is UNKNOWN.  
+	 * is UNKNOWN. 
 	 * if all nodes come back with UNKNOWN then we know
 	 * the lock needs remastering here.
 	 * if any node comes back with a valid master, check
-	 * to see if that master is the one that we are 
+	 * to see if that master is the one that we are
 	 * recovering.  if so, then the new_master died and
 	 * we need to remaster this lock.  if not, then the
 	 * new_master survived and that node will respond to
-	 * other nodes about the owner.  
-	 * if there is an owner, this node needs to dump this 
+	 * other nodes about the owner. 
+	 * if there is an owner, this node needs to dump this
 	 * lockres and alert the sender that this lockres
 	 * was rejected. */
 	spin_lock(&dlm->spinlock);
@@ -1251,15 +1253,16 @@
 	}
 	return ret;
 }
-		
+	
 
-static int dlm_do_master_requery(dlm_ctxt *dlm, dlm_lock_resource *res, 
+static int dlm_do_master_requery(struct dlm_ctxt *dlm,
+				 struct dlm_lock_resource *res,
 				 u8 nodenum, u8 *real_master)
 {
 	int ret = -EINVAL;
-	dlm_master_requery req;
+	struct dlm_master_requery req;
 	int status = DLM_LOCK_RES_OWNER_UNKNOWN;
-	
+
 	memset(&req, 0, sizeof(req));
 	req.node_idx = dlm->node_num;
 	req.namelen = res->lockname.len;
@@ -1287,14 +1290,14 @@
  * be trusted */
 int dlm_master_requery_handler(o2net_msg *msg, u32 len, void *data)
 {
-	dlm_ctxt *dlm = data;
-	dlm_master_requery *req = (dlm_master_requery *)msg->buf;
-	dlm_lock_resource *res = NULL;
+	struct dlm_ctxt *dlm = data;
+	struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
+	struct dlm_lock_resource *res = NULL;
 	int master = DLM_LOCK_RES_OWNER_UNKNOWN;
 	u32 flags = DLM_ASSERT_MASTER_REQUERY;
-	
+
 	if (!dlm_grab(dlm)) {
-		/* since the domain has gone away on this 
+		/* since the domain has gone away on this
 		 * node, the proper response is UNKNOWN */
 		return master;
 	}
@@ -1307,7 +1310,7 @@
 		spin_lock(&res->spinlock);
 		master = res->owner;
 		if (master == dlm->node_num) {
-			int ret = dlm_dispatch_assert_master(dlm, res, 
+			int ret = dlm_dispatch_assert_master(dlm, res,
 							     0, 0, flags);
 			if (ret < 0) {
 				mlog_errno(-ENOMEM);
@@ -1323,8 +1326,8 @@
 	return master;
 }
 
-static inline struct list_head * dlm_list_num_to_pointer(dlm_lock_resource *res,
-							 int list_num)
+static inline struct list_head *
+dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
 {
 	struct list_head *ret;
 	BUG_ON(list_num < 0);
@@ -1333,22 +1336,22 @@
 	ret += list_num;
 	return ret;
 }
-/* TODO: do ast flush business 
+/* TODO: do ast flush business
  * TODO: do MIGRATING and RECOVERING spinning
  */
 
 /*
 * NOTE about in-flight requests during migration:
 *
-* Before attempting the migrate, the master has marked the lockres as 
+* Before attempting the migrate, the master has marked the lockres as
 * MIGRATING and then flushed all of its pending ASTS.  So any in-flight
-* requests either got queued before the MIGRATING flag got set, in which 
+* requests either got queued before the MIGRATING flag got set, in which
 * case the lock data will reflect the change and a return message is on
 * the way, or the request failed to get in before MIGRATING got set.  In
 * this case, the caller will be told to spin and wait for the MIGRATING
 * flag to be dropped, then recheck the master.
 * This holds true for the convert, cancel and unlock cases, and since lvb
-* updates are tied to these same messages, it applies to lvb updates as 
+* updates are tied to these same messages, it applies to lvb updates as
 * well.  For the lock case, there is no way a lock can be on the master
 * queue and not be on the secondary queue since the lock is always added
 * locally first.  This means that the new target node will never be sent
@@ -1360,17 +1363,18 @@
 * We will *not* attempt to modify the lock underneath the waiter.
 */
 
-static int dlm_process_recovery_data(dlm_ctxt *dlm, dlm_lock_resource *res,
-				     dlm_migratable_lockres *mres)
+static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
+				     struct dlm_lock_resource *res,
+				     struct dlm_migratable_lockres *mres)
 {
-	dlm_migratable_lock *ml;
+	struct dlm_migratable_lock *ml;
 	struct list_head *queue;
-	dlm_lock *newlock = NULL;
-	dlm_lockstatus *lksb = NULL;
+	struct dlm_lock *newlock = NULL;
+	struct dlm_lockstatus *lksb = NULL;
 	int ret = 0;
 	int i;
 	struct list_head *iter;
-	dlm_lock *lock = NULL;
+	struct dlm_lock *lock = NULL;
 
 	mlog(0, "running %d locks for this lockres\n", mres->num_locks);
 	for (i=0; i<mres->num_locks; i++) {
@@ -1378,8 +1382,8 @@
 		BUG_ON(ml->highest_blocked != LKM_IVMODE);
 		newlock = NULL;
 		lksb = NULL;
-		
-		queue = dlm_list_num_to_pointer(res, ml->list);	
+	
+		queue = dlm_list_num_to_pointer(res, ml->list);
 
 		/* if the lock is for the local node it needs to
 		 * be moved to the proper location within the queue.
@@ -1387,10 +1391,10 @@
 		if (ml->node == dlm->node_num) {
 			/* MIGRATION ONLY! */
 			BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
-			
+		
 			spin_lock(&res->spinlock);
 			list_for_each(iter, queue) {
-				lock = list_entry (iter, dlm_lock, list);
+				lock = list_entry (iter, struct dlm_lock, list);
 				if (lock->ml.cookie != ml->cookie)
 					lock = NULL;
 				else
@@ -1406,16 +1410,16 @@
 				BUG();
 			}
 			BUG_ON(lock->ml.node != ml->node);
+		
+			/* see NOTE above about why we do not update
+			 * to match the master here */
 			
-			/* see NOTE above about why we do not update 
-			 * to match the master here */
-				
 			/* move the lock to its proper place */
 			/* do not alter lock refcount.  switching lists. */
 			list_del_init(&lock->list);
 			list_add_tail(&lock->list, queue);
 			spin_unlock(&res->spinlock);
-			
+		
 			mlog(0, "just reordered a local lock!\n");
 			continue;
 		}
@@ -1428,15 +1432,15 @@
 		}
 		lksb = newlock->lksb;
 		dlm_lock_attach_lockres(newlock, res);
-		
+	
 		if (ml->convert_type != LKM_IVMODE) {
 			BUG_ON(queue != &res->converting);
 			newlock->ml.convert_type = ml->convert_type;
 		}
-		lksb->flags |= (ml->flags & 
+		lksb->flags |= (ml->flags &
 				(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
 		if (lksb->flags & DLM_LKSB_PUT_LVB) {
-			/* other node was trying to update 
+			/* other node was trying to update
 			 * lvb when node died.  recreate the
 			 * lksb with the updated lvb. */
 			memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
@@ -1448,13 +1452,13 @@
 		 *       meaningless.
 		 *    2. order of locks on converting queue is
 		 *       LOST with the node death.  sorry charlie.
-		 *    3. order of locks on the blocked queue is 
+		 *    3. order of locks on the blocked queue is
 		 *       also LOST.
 		 * order of locks does not affect integrity, it
-		 * just means that a lock request may get pushed 
+		 * just means that a lock request may get pushed
 		 * back in line as a result of the node death.
 		 * also note that for a given node the lock order
-		 * for its secondary queue locks is preserved 
+		 * for its secondary queue locks is preserved
 		 * relative to each other, but clearly *not*
 		 * preserved relative to locks from other nodes.
 		 */
@@ -1476,12 +1480,13 @@
 	return ret;
 }
 
-void dlm_move_lockres_to_recovery_list(dlm_ctxt *dlm, dlm_lock_resource *res)
+void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
+				       struct dlm_lock_resource *res)
 {
 	int i;
 	struct list_head *queue, *iter, *iter2;
-	dlm_lock *lock;
-	
+	struct dlm_lock *lock;
+
 	res->state |= DLM_LOCK_RES_RECOVERING;
 	if (!list_empty(&res->recovering))
 		list_del_init(&res->recovering);
@@ -1491,7 +1496,7 @@
 	for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
 		queue = dlm_list_idx_to_ptr(res, i);
 		list_for_each_safe(iter, iter2, queue) {
-			lock = list_entry (iter, dlm_lock, list);
+			lock = list_entry (iter, struct dlm_lock, list);
 			dlm_lock_get(lock);
 			if (lock->convert_pending) {
 				/* move converting lock back to granted */
@@ -1515,13 +1520,13 @@
 				dlm_revert_pending_lock(res, lock);
 				lock->lock_pending = 0;
 			} else if (lock->unlock_pending) {
-				/* if an unlock was in progress, treat as 
+				/* if an unlock was in progress, treat as
 				 * if this had completed successfully
 				 * before sending this lock state to the
 				 * new master.  note that the dlm_unlock
-				 * call is still responsible for calling 
+				 * call is still responsible for calling
 				 * the unlockast.  that will happen after
-				 * the network call times out.  for now, 
+				 * the network call times out.  for now,
 				 * just move lists to prepare the new
 				 * recovery master.  */
 				BUG_ON(i != DLM_GRANTED_LIST);
@@ -1531,7 +1536,7 @@
 				dlm_commit_pending_unlock(res, lock);
 				lock->unlock_pending = 0;
 			} else if (lock->cancel_pending) {
-				/* if a cancel was in progress, treat as 
+				/* if a cancel was in progress, treat as
 				 * if this had completed successfully
 				 * before sending this lock state to the
 				 * new master */
@@ -1552,19 +1557,19 @@
 /* removes all recovered locks from the recovery list.
  * sets the res->owner to the new master.
  * unsets the RECOVERY flag and wakes waiters. */
-static void dlm_finish_local_lockres_recovery(dlm_ctxt *dlm, u8 dead_node,
-					      u8 new_master)
+static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
+					      u8 dead_node, u8 new_master)
 {
 	int i;
 	struct list_head *iter, *iter2, *bucket;
-	dlm_lock_resource *res;
+	struct dlm_lock_resource *res;
 
 	mlog_entry_void();
-		   
+		  
 	assert_spin_locked(&dlm->spinlock);
 
 	list_for_each_safe(iter, iter2, &dlm->reco.resources) {
-		res = list_entry (iter, dlm_lock_resource, recovering);
+		res = list_entry (iter, struct dlm_lock_resource, recovering);
 		if (res->owner == dead_node) {
 			list_del_init(&res->recovering);
 			spin_lock(&res->spinlock);
@@ -1575,20 +1580,20 @@
 			wake_up(&res->wq);
 		}
 	}
-	
+
 	/* this will become unnecessary eventually, but
 	 * for now we need to run the whole hash, clear
-	 * the RECOVERING state and set the owner 
+	 * the RECOVERING state and set the owner
 	 * if necessary */
 	for (i=0; i<DLM_HASH_SIZE; i++) {
 		bucket = &(dlm->resources[i]);
 		list_for_each(iter, bucket) {
-			res = list_entry (iter, dlm_lock_resource, list);
+			res = list_entry (iter, struct dlm_lock_resource, list);
 			if (res->state & DLM_LOCK_RES_RECOVERING) {
 				if (res->owner == dead_node) {
 					mlog(0, "(this=%u) res %.*s owner=%u "
 					     "was not on recovering list, but "
-					     "clearing state anyway\n", 
+					     "clearing state anyway\n",
 					     dlm->node_num, res->lockname.len,
 					     res->lockname.name, new_master);
 				} else if (res->owner == dlm->node_num) {
@@ -1597,7 +1602,7 @@
 					     "owner is THIS node, clearing\n",
 					     dlm->node_num, res->lockname.len,
 					     res->lockname.name, new_master);
-				} else 
+				} else
 					continue;
 
 				spin_lock(&res->spinlock);
@@ -1611,32 +1616,32 @@
 	}
 }
 
-static void dlm_free_dead_locks(dlm_ctxt *dlm, dlm_lock_resource *res,
-				u8 dead_node)
+static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
+				struct dlm_lock_resource *res, u8 dead_node)
 {
 	struct list_head *iter, *tmpiter;
-	dlm_lock *lock;
+	struct dlm_lock *lock;
 
 	assert_spin_locked(&dlm->spinlock);
 	assert_spin_locked(&res->spinlock);
 
 	/* TODO: check pending_asts, pending_basts here */
 	list_for_each_safe(iter, tmpiter, &res->granted) {
-		lock = list_entry (iter, dlm_lock, list);
+		lock = list_entry (iter, struct dlm_lock, list);
 		if (lock->ml.node == dead_node) {
 			list_del_init(&lock->list);
 			dlm_lock_put(lock);
 		}
 	}
 	list_for_each_safe(iter, tmpiter, &res->converting) {
-		lock = list_entry (iter, dlm_lock, list);
+		lock = list_entry (iter, struct dlm_lock, list);
 		if (lock->ml.node == dead_node) {
 			list_del_init(&lock->list);
 			dlm_lock_put(lock);
 		}
 	}
 	list_for_each_safe(iter, tmpiter, &res->blocked) {
-		lock = list_entry (iter, dlm_lock, list);
+		lock = list_entry (iter, struct dlm_lock, list);
 		if (lock->ml.node == dead_node) {
 			list_del_init(&lock->list);
 			dlm_lock_put(lock);
@@ -1647,21 +1652,21 @@
 	__dlm_dirty_lockres(dlm, res);
 }
 
-static void dlm_do_local_recovery_cleanup(dlm_ctxt *dlm, u8 dead_node)
+static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
 {
 	struct list_head *iter;
-	dlm_lock_resource *res;
+	struct dlm_lock_resource *res;
 	int i;
 	struct list_head *bucket;
 
 
-	/* purge any stale mles */	
+	/* purge any stale mles */
 	dlm_clean_master_list(dlm, dead_node);
 
-	/* 
+	/*
 	 * now clean up all lock resources.  there are two rules:
 	 *
-	 * 1) if the dead node was the master, move the lockres 
+	 * 1) if the dead node was the master, move the lockres
 	 *    to the recovering list.  set the RECOVERING flag.
 	 *    this lockres needs to be cleaned up before it can
 	 *    be used further.
@@ -1669,13 +1674,13 @@
 	 * 2) if this node was the master, remove all locks from
 	 *    each of the lockres queues that were owned by the
 	 *    dead node.  once recovery finishes, the dlm thread
-	 *    can be kicked again to see if any ASTs or BASTs 
+	 *    can be kicked again to see if any ASTs or BASTs
 	 *    need to be fired as a result.
 	 */
 	for (i=0; i<DLM_HASH_SIZE; i++) {
 		bucket = &(dlm->resources[i]);
 		list_for_each(iter, bucket) {
-			res = list_entry (iter, dlm_lock_resource, list);
+			res = list_entry (iter, struct dlm_lock_resource, list);
 			if (dlm_is_recovery_lock(res->lockname.name,
 						 res->lockname.len))
 				continue;
@@ -1690,9 +1695,9 @@
 
 }
 
-void __dlm_hb_node_down(dlm_ctxt *dlm, int idx)
+void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
 {
-	dlm_master_list_entry *mle;
+	struct dlm_master_list_entry *mle;
 	struct list_head *iter;
 
 	assert_spin_locked(&dlm->spinlock);
@@ -1727,7 +1732,7 @@
 
 	/* notify any mles attached to the heartbeat events */
 	list_for_each(iter, &dlm->mle_hb_events) {
-		mle = list_entry(iter, dlm_master_list_entry, hb_events);
+		mle = list_entry(iter, struct dlm_master_list_entry, hb_events);
 		dlm_mle_node_down(dlm, mle, NULL, idx);
 	}
 
@@ -1744,7 +1749,7 @@
 
 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
 {
-	dlm_ctxt *dlm = data;
+	struct dlm_ctxt *dlm = data;
 
 	if (!dlm_grab(dlm))
 		return;
@@ -1758,8 +1763,8 @@
 
 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
 {
-	dlm_ctxt *dlm = data;
-	dlm_master_list_entry *mle;
+	struct dlm_ctxt *dlm = data;
+	struct dlm_master_list_entry *mle;
 	struct list_head *iter;
 
 	if (!dlm_grab(dlm))
@@ -1771,7 +1776,7 @@
 
 	/* notify any mles attached to the heartbeat events */
 	list_for_each(iter, &dlm->mle_hb_events) {
-		mle = list_entry(iter, dlm_master_list_entry, hb_events);
+		mle = list_entry(iter, struct dlm_master_list_entry, hb_events);
 		dlm_mle_node_up(dlm, mle, node, idx);
 	}
 
@@ -1780,14 +1785,14 @@
 	dlm_put(dlm);
 }
 
-int __dlm_hb_node_dead(dlm_ctxt *dlm, int node)
+int __dlm_hb_node_dead(struct dlm_ctxt *dlm, int node)
 {
 	if (test_bit(node, dlm->recovery_map))
 		return 1;
 	return 0;
 }
 
-int dlm_hb_node_dead(dlm_ctxt *dlm, int node)
+int dlm_hb_node_dead(struct dlm_ctxt *dlm, int node)
 {
 	int ret;
 	spin_lock(&dlm->spinlock);
@@ -1798,26 +1803,26 @@
 
 static void dlm_reco_ast(void *astdata)
 {
-	dlm_ctxt *dlm = astdata;
+	struct dlm_ctxt *dlm = astdata;
 	mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
 	     dlm->node_num, dlm->name);
 }
 static void dlm_reco_bast(void *astdata, int blocked_type)
 {
-	dlm_ctxt *dlm = astdata;
+	struct dlm_ctxt *dlm = astdata;
 	mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
 	     dlm->node_num, dlm->name);
 }
-static void dlm_reco_unlock_ast(void *astdata, dlm_status st)
+static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
 {
 	mlog(0, "unlockast for recovery lock fired!\n");
 }
 
 
-int dlm_pick_recovery_master(dlm_ctxt *dlm)
+int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
 {
-	dlm_status ret;
-	dlm_lockstatus lksb;
+	enum dlm_status ret;
+	struct dlm_lockstatus lksb;
 	int status = -EINVAL;
 
 	mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
@@ -1825,26 +1830,26 @@
 retry:
 	memset(&lksb, 0, sizeof(lksb));
 
-	ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY, 
+	ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
 		      DLM_RECOVERY_LOCK_NAME, dlm_reco_ast, dlm, dlm_reco_bast);
 
 	if (ret == DLM_NORMAL) {
 		mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
 		     dlm->name, dlm->node_num);
-		/* I am master, send message to all nodes saying 
+		/* I am master, send message to all nodes saying
 		 * that I am beginning a recovery session */
-		status = dlm_send_begin_reco_message(dlm, 
+		status = dlm_send_begin_reco_message(dlm,
 					      dlm->reco.dead_node);
 
 		/* recovery lock is a special case.  ast will not get fired,
 		 * so just go ahead and unlock it. */
 		ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
 		if (ret != DLM_NORMAL) {
-			/* this would really suck. this could only happen 
+			/* this would really suck. this could only happen
 			 * if there was a network error during the unlock
 			 * because of node death.  this means the unlock
 			 * is actually "done" and the lock structure is
-			 * even freed.  we can continue, but only 
+			 * even freed.  we can continue, but only
 			 * because this specific lock name is special. */
 			mlog(0, "dlmunlock returned %d\n", ret);
 		}
@@ -1856,20 +1861,20 @@
 		}
 	} else if (ret == DLM_NOTQUEUED) {
 		mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
-		     dlm->name, dlm->node_num);		
-		/* another node is master. wait on 
+		     dlm->name, dlm->node_num);	
+		/* another node is master. wait on
 		 * reco.new_master != O2NM_INVALID_NODE_NUM */
 		status = -EEXIST;
-	} 
+	}
 
 	return status;
 }
 
-static int dlm_send_begin_reco_message(dlm_ctxt *dlm, u8 dead_node)
+static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
 {
-	dlm_begin_reco br;
+	struct dlm_begin_reco br;
 	int ret = 0;
-	dlm_node_iter iter;
+	struct dlm_node_iter iter;
 	int nodenum;
 	int status;
 
@@ -1880,7 +1885,7 @@
 	spin_lock(&dlm->spinlock);
 	dlm_node_iter_init(dlm->domain_map, &iter);
 	spin_unlock(&dlm->spinlock);
-	
+
 	clear_bit(dead_node, iter.node_map);
 
 	memset(&br, 0, sizeof(br));
@@ -1903,12 +1908,12 @@
 		ret = -EINVAL;
 		mlog(0, "attempting to send begin reco msg to %d\n",
 			  nodenum);
-		ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key, 
+		ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
 					 &br, sizeof(br), nodenum, &status);
 		if (ret >= 0)
 			ret = status;
 		if (ret < 0) {
-			dlm_lock_resource *res;
+			struct dlm_lock_resource *res;
 			mlog_errno(ret);
 			mlog(ML_ERROR, "begin reco of dlm %s to node %u "
 			    " returned %d\n", dlm->name, nodenum, ret);
@@ -1929,10 +1934,10 @@
 
 int dlm_begin_reco_handler(o2net_msg *msg, u32 len, void *data)
 {
-	dlm_ctxt *dlm = data;
-	dlm_begin_reco *br = (dlm_begin_reco *)msg->buf;
+	struct dlm_ctxt *dlm = data;
+	struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
 
-	/* ok to return 0, domain has gone away */	
+	/* ok to return 0, domain has gone away */
 	if (!dlm_grab(dlm))
 		return 0;
 
@@ -1967,11 +1972,11 @@
 	return 0;
 }
 
-static int dlm_send_finalize_reco_message(dlm_ctxt *dlm)
+static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
 {
 	int ret = 0;
-	dlm_finalize_reco fr;
-	dlm_node_iter iter;
+	struct dlm_finalize_reco fr;
+	struct dlm_node_iter iter;
 	int nodenum;
 	int status;
 
@@ -1990,7 +1995,7 @@
 	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
 		if (nodenum == dlm->node_num)
 			continue;
-		ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key, 
+		ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
 					 &fr, sizeof(fr), nodenum, &status);
 		if (ret >= 0)
 			ret = status;
@@ -2005,10 +2010,10 @@
 
 int dlm_finalize_reco_handler(o2net_msg *msg, u32 len, void *data)
 {
-	dlm_ctxt *dlm = data;
-	dlm_finalize_reco *fr = (dlm_finalize_reco *)msg->buf;
+	struct dlm_ctxt *dlm = data;
+	struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
 
-	/* ok to return 0, domain has gone away */	
+	/* ok to return 0, domain has gone away */
 	if (!dlm_grab(dlm))
 		return 0;
 
@@ -2016,7 +2021,7 @@
 
 	mlog(0, "node %u finalizing recovery of node %u\n",
 	     fr->node_idx, fr->dead_node);
-	
+
 	spin_lock(&dlm->spinlock);
 
 	if (dlm->reco.new_master != fr->node_idx) {
@@ -2031,7 +2036,7 @@
 		     fr->node_idx, fr->dead_node, dlm->reco.dead_node);
 		BUG();
 	}
-	
+
 	dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
 
 	spin_unlock(&dlm->spinlock);

Modified: trunk/fs/ocfs2/dlm/dlmthread.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmthread.c	2005-06-29 22:47:38 UTC (rev 2447)
+++ trunk/fs/ocfs2/dlm/dlmthread.c	2005-06-30 00:30:29 UTC (rev 2448)
@@ -61,7 +61,7 @@
 
 /* will exit holding res->spinlock, but may drop in function */
 /* waits until flags are cleared on res->state */
-void __dlm_wait_on_lockres_flags(dlm_lock_resource *res, int flags)
+void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags)
 {
 	DECLARE_WAITQUEUE(wait, current);
 
@@ -81,7 +81,7 @@
 }
 
 /* opposite of the above, waits until flags are SET */
-void __dlm_wait_on_lockres_flags_set(dlm_lock_resource *res, int flags)
+void __dlm_wait_on_lockres_flags_set(struct dlm_lock_resource *res, int flags)
 {
 	DECLARE_WAITQUEUE(wait, current);
 
@@ -100,7 +100,7 @@
 	current->state = TASK_RUNNING;
 }
 
-static int __dlm_lockres_unused(dlm_lock_resource *res)
+static int __dlm_lockres_unused(struct dlm_lock_resource *res)
 {
 	if (list_empty(&res->granted) &&
 	    list_empty(&res->converting) &&
@@ -114,7 +114,8 @@
 /* Call whenever you may have added or deleted something from one of
  * the lockres queue's. This will figure out whether it belongs on the
  * unused list or not and does the appropriate thing. */
-static void __dlm_lockres_calc_usage(dlm_ctxt *dlm, dlm_lock_resource *res)
+static void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
+				     struct dlm_lock_resource *res)
 {
 	mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
 
@@ -137,8 +138,8 @@
 	}
 }
 
-void dlm_lockres_calc_usage(dlm_ctxt *dlm,
-			    dlm_lock_resource *res)
+void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
+			    struct dlm_lock_resource *res)
 {
 	mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
 	spin_lock(&dlm->spinlock);
@@ -152,7 +153,7 @@
 
 /* TODO: Eventual API: Called with the dlm spinlock held, may drop it
  * to do migration, but will re-acquire before exit. */
-void dlm_purge_lockres(dlm_ctxt *dlm, dlm_lock_resource *lockres)
+void dlm_purge_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *lockres)
 {
 	int master;
 	int ret;
@@ -195,12 +196,12 @@
 	__dlm_unhash_lockres(lockres);
 }
 
-static void dlm_run_purge_list(dlm_ctxt *dlm,
+static void dlm_run_purge_list(struct dlm_ctxt *dlm,
 			       int purge_now)
 {
 	unsigned int run_max, unused;
 	unsigned long purge_jiffies;
-	dlm_lock_resource *lockres;
+	struct dlm_lock_resource *lockres;
 
 	spin_lock(&dlm->spinlock);
 	run_max = dlm->purge_count;
@@ -209,7 +210,7 @@
 		run_max--;
 
 		lockres = list_entry(dlm->purge_list.next,
-				     dlm_lock_resource, purge);
+				     struct dlm_lock_resource, purge);
 
 		/* Status of the lockres *might* change so double
 		 * check. If the lockres is unused, holding the dlm
@@ -223,7 +224,7 @@
 		if (!unused)
 			continue;
 
-		purge_jiffies = lockres->last_used + 
+		purge_jiffies = lockres->last_used +
 			msecs_to_jiffies(DLM_PURGE_INTERVAL_MS);
 
 		/* Make sure that we want to be processing this guy at
@@ -252,16 +253,16 @@
 	spin_unlock(&dlm->spinlock);
 }
 
-void dlm_shuffle_lists(dlm_ctxt *dlm, dlm_lock_resource *res)
+void dlm_shuffle_lists(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
 {
-	dlm_lock *lock, *target;
+	struct dlm_lock *lock, *target;
 	struct list_head *iter;
 	struct list_head *head;
 	int can_grant = 1;
 
 	//mlog(0, "res->lockname.len=%d\n", res->lockname.len);
 	//mlog(0, "res->lockname.name=%p\n", res->lockname.name);
-	//mlog(0, "shuffle res %.*s\n", res->lockname.len, 
+	//mlog(0, "shuffle res %.*s\n", res->lockname.len,
 	//	  res->lockname.name);
 
 	/* because this function is called with the lockres
@@ -279,7 +280,7 @@
 	mlog(0, "res %.*s has locks on a convert queue\n", res->lockname.len,
 	     res->lockname.name);
 
-	target = list_entry(res->converting.next, dlm_lock, list);
+	target = list_entry(res->converting.next, struct dlm_lock, list);
 	if (target->ml.convert_type == LKM_IVMODE) {
 		mlog(ML_ERROR, "%.*s: converting a lock with no "
 		     "convert_type!\n", res->lockname.len, res->lockname.name);
@@ -287,10 +288,10 @@
 	}
 	head = &res->granted;
 	list_for_each(iter, head) {
-		lock = list_entry(iter, dlm_lock, list);
+		lock = list_entry(iter, struct dlm_lock, list);
 		if (lock==target)
 			continue;
-		if (!dlm_lock_compatible(lock->ml.type, 
+		if (!dlm_lock_compatible(lock->ml.type,
 					 target->ml.convert_type)) {
 			can_grant = 0;
 			/* queue the BAST if not already */
@@ -300,16 +301,16 @@
 			}
 			/* update the highest_blocked if needed */
 			if (lock->ml.highest_blocked < target->ml.convert_type)
-				lock->ml.highest_blocked = 
+				lock->ml.highest_blocked =
 					target->ml.convert_type;
 		}
 	}
 	head = &res->converting;
 	list_for_each(iter, head) {
-		lock = list_entry(iter, dlm_lock, list);
+		lock = list_entry(iter, struct dlm_lock, list);
 		if (lock==target)
 			continue;
-		if (!dlm_lock_compatible(lock->ml.type, 
+		if (!dlm_lock_compatible(lock->ml.type,
 					 target->ml.convert_type)) {
 			can_grant = 0;
 			if (lock->ml.highest_blocked == LKM_IVMODE) {
@@ -317,19 +318,19 @@
 				dlm_queue_bast(dlm, lock);
 			}
 			if (lock->ml.highest_blocked < target->ml.convert_type)
-				lock->ml.highest_blocked = 
+				lock->ml.highest_blocked =
 					target->ml.convert_type;
 		}
 	}
-	
+
 	/* we can convert the lock */
 	if (can_grant) {
 		spin_lock(&target->spinlock);
-		BUG_ON(target->ml.highest_blocked != LKM_IVMODE);	
-		
+		BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
+	
 		mlog(0, "calling ast for converting lock: %.*s, have: %d, "
-		     "granting: %d, node: %u\n", res->lockname.len, 
-		     res->lockname.name, target->ml.type, 
+		     "granting: %d, node: %u\n", res->lockname.len,
+		     res->lockname.name, target->ml.type,
 		     target->ml.convert_type, target->ml.node);
 
 		target->ml.type = target->ml.convert_type;
@@ -351,11 +352,11 @@
 blocked:
 	if (list_empty(&res->blocked))
 		goto leave;
-	target = list_entry(res->blocked.next, dlm_lock, list);
+	target = list_entry(res->blocked.next, struct dlm_lock, list);
 
 	head = &res->granted;
 	list_for_each(iter, head) {
-		lock = list_entry(iter, dlm_lock, list);
+		lock = list_entry(iter, struct dlm_lock, list);
 		if (lock==target)
 			continue;
 		if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
@@ -371,7 +372,7 @@
 
 	head = &res->converting;
 	list_for_each(iter, head) {
-		lock = list_entry(iter, dlm_lock, list);
+		lock = list_entry(iter, struct dlm_lock, list);
 		if (lock==target)
 			continue;
 		if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
@@ -384,15 +385,15 @@
 				lock->ml.highest_blocked = target->ml.type;
 		}
 	}
-	
-	/* we can grant the blocked lock (only 
+
+	/* we can grant the blocked lock (only
 	 * possible if converting list empty) */
 	if (can_grant) {
 		spin_lock(&target->spinlock);
 		BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
-		
+	
 		mlog(0, "calling ast for blocked lock: %.*s, granting: %d, "
-		     "node: %u\n", res->lockname.len, res->lockname.name, 
+		     "node: %u\n", res->lockname.len, res->lockname.name,
 		     target->ml.type, target->ml.node);
 
 		// target->ml.type is already correct
@@ -401,7 +402,7 @@
 
 		BUG_ON(!target->lksb);
 		target->lksb->status = DLM_NORMAL;
-		
+	
 		spin_unlock(&target->spinlock);
 
 		__dlm_lockres_reserve_ast(res);
@@ -415,7 +416,7 @@
 }
 
 /* must have NO locks when calling this with res !=NULL * */
-void dlm_kick_thread(dlm_ctxt *dlm, dlm_lock_resource *res)
+void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
 {
 	mlog_entry("dlm=%p, res=%p\n", dlm, res);
 	if (res) {
@@ -428,7 +429,7 @@
 	wake_up(&dlm->dlm_thread_wq);
 }
 
-void __dlm_kick_thread(dlm_ctxt *dlm, dlm_lock_resource *res)
+void __dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
 {
 	mlog_entry("dlm=%p, res=%p\n", dlm, res);
 	if (res)
@@ -437,10 +438,10 @@
 	wake_up(&dlm->dlm_thread_wq);
 }
 
-void __dlm_dirty_lockres(dlm_ctxt *dlm, dlm_lock_resource *res)
+void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
 {
 	mlog_entry("dlm=%p, res=%p\n", dlm, res);
-	
+
 	assert_spin_locked(&dlm->spinlock);
 	assert_spin_locked(&res->spinlock);
 
@@ -454,7 +455,7 @@
 
 
 /* Launch the NM thread for the mounted volume */
-int dlm_launch_thread(dlm_ctxt *dlm)
+int dlm_launch_thread(struct dlm_ctxt *dlm)
 {
 	mlog(0, "starting dlm thread...\n");
 
@@ -468,7 +469,7 @@
 	return 0;
 }
 
-void dlm_complete_thread(dlm_ctxt *dlm)
+void dlm_complete_thread(struct dlm_ctxt *dlm)
 {
 	if (dlm->dlm_thread_task) {
 		mlog(ML_KTHREAD, "waiting for dlm thread to exit\n");
@@ -477,35 +478,35 @@
 	}
 }
 
-static int dlm_dirty_list_empty(dlm_ctxt *dlm)
+static int dlm_dirty_list_empty(struct dlm_ctxt *dlm)
 {
 	int empty;
 
 	spin_lock(&dlm->spinlock);
 	empty = list_empty(&dlm->dirty_list);
 	spin_unlock(&dlm->spinlock);
-	
+
 	return empty;
 }
 
-int dlm_flush_lockres_asts(dlm_ctxt *dlm, dlm_lock_resource *res)
+int dlm_flush_lockres_asts(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
 {
 	dlm_flush_asts(dlm);
 	/* still need to implement dlm_flush_lockres_asts */
 	return 0;
 }
 
-void dlm_flush_asts(dlm_ctxt *dlm)
+void dlm_flush_asts(struct dlm_ctxt *dlm)
 {
 	int ret;
-	dlm_lock *lock;
-	dlm_lock_resource *res;
+	struct dlm_lock *lock;
+	struct dlm_lock_resource *res;
 	u8 hi;
 
 	spin_lock(&dlm->ast_lock);
 	while (!list_empty(&dlm->pending_asts)) {
-		lock = list_entry(dlm->pending_asts.next, 
-				  dlm_lock, ast_list);
+		lock = list_entry(dlm->pending_asts.next,
+				  struct dlm_lock, ast_list);
 		/* get an extra ref on lock */
 		dlm_lock_get(lock);
 		res = lock->lockres;
@@ -541,10 +542,10 @@
 		dlm_lock_put(lock);
 		dlm_lockres_release_ast(res);
 	}
-	
+
 	while (!list_empty(&dlm->pending_basts)) {
-		lock = list_entry(dlm->pending_basts.next, 
-				  dlm_lock, bast_list);
+		lock = list_entry(dlm->pending_basts.next,
+				  struct dlm_lock, bast_list);
 		/* get an extra ref on lock */
 		dlm_lock_get(lock);
 		res = lock->lockres;
@@ -565,14 +566,14 @@
 
 		mlog(0, "delivering a bast for this lockres "
 		     "(blocked = %d\n", hi);
-		
+	
 		if (lock->ml.node != dlm->node_num) {
 			ret = dlm_send_proxy_bast(dlm, res, lock, hi);
 			if (ret < 0)
 				mlog_errno(ret);
 		} else
 			dlm_do_local_bast(dlm, res, lock, hi);
-		
+	
 		spin_lock(&dlm->ast_lock);
 
 		/* possible that another bast was queued while
@@ -583,7 +584,7 @@
 			     "keep the bast_pending flag set.\n");
 		} else
 			lock->bast_pending = 0;
-		
+	
 		/* drop the extra ref.
 		 * this may drop it completely. */
 		dlm_lock_put(lock);
@@ -599,51 +600,51 @@
 
 static int dlm_thread(void *data)
 {
-	dlm_lock_resource *res;
-	dlm_ctxt *dlm = data;
+	struct dlm_lock_resource *res;
+	struct dlm_ctxt *dlm = data;
 	unsigned long timeout = msecs_to_jiffies(DLM_THREAD_TIMEOUT_MS);
 
 	mlog(0, "dlm thread running for %s...\n", dlm->name);
 
 	while (!kthread_should_stop()) {
 		int n = DLM_THREAD_MAX_DIRTY;
-		
+	
 		/* dlm_shutting_down is very point-in-time, but that
 		 * doesn't matter as we'll just loop back around if we
 		 * get false on the leading edge of a state
 		 * transition. */
 		dlm_run_purge_list(dlm, dlm_shutting_down(dlm));
 
-		/* We really don't want to hold dlm->spinlock while 
+		/* We really don't want to hold dlm->spinlock while
 		 * calling dlm_shuffle_lists on each lockres that
-		 * needs to have its queues adjusted and AST/BASTs 
+		 * needs to have its queues adjusted and AST/BASTs
 		 * run.  So let's pull each entry off the dirty_list
 		 * and drop dlm->spinlock ASAP.  Once off the list,
-		 * res->spinlock needs to be taken again to protect 
+		 * res->spinlock needs to be taken again to protect
 		 * the queues while calling dlm_shuffle_lists.  */
 		spin_lock(&dlm->spinlock);
 		while (!list_empty(&dlm->dirty_list)) {
 			int delay = 0;
-			res = list_entry(dlm->dirty_list.next, 
-					 dlm_lock_resource, dirty);
-		
+			res = list_entry(dlm->dirty_list.next,
+					 struct dlm_lock_resource, dirty);
+	
 			/* peel a lockres off, remove it from the list,
 			 * unset the dirty flag and drop the dlm lock */
 			BUG_ON(!res);
 			dlm_lockres_get(res);
-			
+		
 			spin_lock(&res->spinlock);
 			res->state &= ~DLM_LOCK_RES_DIRTY;
 			list_del_init(&res->dirty);
 			spin_unlock(&res->spinlock);
 			spin_unlock(&dlm->spinlock);
 
-		 	/* lockres can be re-dirtied/re-added to the 
+		 	/* lockres can be re-dirtied/re-added to the
 			 * dirty_list in this gap, but that is ok */
 
 			spin_lock(&res->spinlock);
 			BUG_ON(res->owner != dlm->node_num);
-			
+		
 			/* it is now ok to move lockreses in these states
 			 * to the dirty list, assuming that they will only be
 			 * dirty for a short while. */
@@ -664,17 +665,17 @@
 			 * recovering/in-progress.  we have the lockres
 			 * spinlock and do NOT have the dlm lock.
 			 * safe to reserve/queue asts and run the lists. */
-		
+	
 			mlog(0, "calling dlm_shuffle_lists with dlm=%p, "
 			     "res=%p\n", dlm, res);
-		
+	
 			/* called while holding lockres lock */
 			dlm_shuffle_lists(dlm, res);
 			spin_unlock(&res->spinlock);
 
 			dlm_lockres_calc_usage(dlm, res);
 
-in_progress:	
+in_progress:
 
 			spin_lock(&dlm->spinlock);
 			/* if the lock was in-progress, stick
@@ -686,7 +687,7 @@
 				spin_unlock(&res->spinlock);
 			}
 			dlm_lockres_put(res);
-			
+		
 			/* unlikely, but we may need to give time to
 			 * other tasks */
 			if (!--n) {

Modified: trunk/fs/ocfs2/dlm/dlmunlock.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmunlock.c	2005-06-29 22:47:38 UTC (rev 2447)
+++ trunk/fs/ocfs2/dlm/dlmunlock.c	2005-06-30 00:30:29 UTC (rev 2448)
@@ -57,35 +57,39 @@
 #define DLM_UNLOCK_CLEAR_CONVERT_TYPE  0x00000010
 
 
-static dlm_status dlm_get_cancel_actions(dlm_ctxt *dlm, dlm_lock_resource *res, 
-					 dlm_lock *lock, dlm_lockstatus *lksb, 
-					 int *actions);
-static dlm_status dlm_get_unlock_actions(dlm_ctxt *dlm, dlm_lock_resource *res,
-					 dlm_lock *lock, dlm_lockstatus *lksb, 
-					 int *actions);
+static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm,
+					      struct dlm_lock_resource *res,
+					      struct dlm_lock *lock,
+					      struct dlm_lockstatus *lksb,
+					      int *actions);
+static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm,
+					      struct dlm_lock_resource *res,
+					      struct dlm_lock *lock,
+					      struct dlm_lockstatus *lksb,
+					      int *actions);
 
-static dlm_status dlm_send_remote_unlock_request(dlm_ctxt *dlm, 
-						 dlm_lock_resource *res, 
-						 dlm_lock *lock, 
-						 dlm_lockstatus *lksb, 
+static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
+						 struct dlm_lock_resource *res,
+						 struct dlm_lock *lock,
+						 struct dlm_lockstatus *lksb,
 						 int flags,
 						 u8 owner);
 
 
-/* 
+/*
  * according to the spec:
  * http://opendlm.sourceforge.net/cvsmirror/opendlm/docs/dlmbook_final.pdf
  *
  *  flags & LKM_CANCEL != 0: must be converting or blocked
  *  flags & LKM_CANCEL == 0: must be granted
  *
- * So to unlock a converting lock, you must first cancel the 
- * convert (passing LKM_CANCEL in flags), then call the unlock 
+ * So to unlock a converting lock, you must first cancel the
+ * convert (passing LKM_CANCEL in flags), then call the unlock
  * again (with no LKM_CANCEL in flags).
  */
 
 
-/* 
+/*
  * locking:
  *   caller needs:  none
  *   taken:         res->spinlock and lock->spinlock taken and dropped
@@ -93,11 +97,14 @@
  * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network
  * all callers should have taken an extra ref on lock coming in
  */
-static dlm_status dlmunlock_common(dlm_ctxt *dlm, dlm_lock_resource *res, 
-				   dlm_lock *lock, dlm_lockstatus *lksb, 
-				   int flags, int *call_ast, int master_node)
+static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
+					struct dlm_lock_resource *res,
+					struct dlm_lock *lock,
+					struct dlm_lockstatus *lksb,
+					int flags, int *call_ast,
+					int master_node)
 {
-	dlm_status status;
+	enum dlm_status status;
 	int actions = 0;
 	int in_use;
         u8 owner;
@@ -153,9 +160,9 @@
 
 	/* By now this has been masked out of cancel requests. */
 	if (flags & LKM_VALBLK) {
-		/* make the final update to the lvb */		
+		/* make the final update to the lvb */	
 		if (master_node)
-			memcpy(res->lvb, lksb->lvb, DLM_LVB_LEN); 
+			memcpy(res->lvb, lksb->lvb, DLM_LVB_LEN);
 		else
 			flags |= LKM_PUT_LVB; /* let the send function
 					       * handle it. */
@@ -181,7 +188,7 @@
 
 	}
 
-	/* get an extra ref on lock.  if we are just switching 
+	/* get an extra ref on lock.  if we are just switching
 	 * lists here, we dont want the lock to go away. */
 	dlm_lock_get(lock);
 
@@ -198,7 +205,7 @@
 		     master_node ? "" : "non-");
 		lock->ml.convert_type = LKM_IVMODE;
 	}
-	
+
 	/* remove the extra ref on lock */
 	dlm_lock_put(lock);
 
@@ -206,7 +213,7 @@
 	res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
 	if (!dlm_lock_on_list(&res->converting, lock))
 		BUG_ON(lock->ml.convert_type != LKM_IVMODE);
-	else 
+	else
 		BUG_ON(lock->ml.convert_type == LKM_IVMODE);
 	spin_unlock(&lock->spinlock);
 	spin_unlock(&res->spinlock);
@@ -230,14 +237,16 @@
 	return status;
 }
 
-void dlm_commit_pending_unlock(dlm_lock_resource *res, dlm_lock *lock)
+void dlm_commit_pending_unlock(struct dlm_lock_resource *res,
+			       struct dlm_lock *lock)
 {
 	/* leave DLM_LKSB_PUT_LVB on the lksb so any final
 	 * update of the lvb will be sent to the new master */
 	list_del_init(&lock->list);
 }
 
-void dlm_commit_pending_cancel(dlm_lock_resource *res, dlm_lock *lock)
+void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
+			       struct dlm_lock *lock)
 {
 	list_del_init(&lock->list);
 	list_add_tail(&lock->list, &res->granted);
@@ -245,42 +254,42 @@
 }
 
 
-static inline dlm_status dlmunlock_master(dlm_ctxt *dlm,
-					  dlm_lock_resource *res,
-					  dlm_lock *lock,
-					  dlm_lockstatus *lksb,
+static inline enum dlm_status dlmunlock_master(struct dlm_ctxt *dlm,
+					  struct dlm_lock_resource *res,
+					  struct dlm_lock *lock,
+					  struct dlm_lockstatus *lksb,
 					  int flags,
 					  int *call_ast)
 {
 	return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 1);
 }
 
-static inline dlm_status dlmunlock_remote(dlm_ctxt *dlm,
-					  dlm_lock_resource *res,
-					  dlm_lock *lock,
-					  dlm_lockstatus *lksb,
+static inline enum dlm_status dlmunlock_remote(struct dlm_ctxt *dlm,
+					  struct dlm_lock_resource *res,
+					  struct dlm_lock *lock,
+					  struct dlm_lockstatus *lksb,
 					  int flags, int *call_ast)
 {
 	return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 0);
 }
 
-/* 
+/*
  * locking:
  *   caller needs:  none
  *   taken:         none
  *   held on exit:  none
  * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network
  */
-static dlm_status dlm_send_remote_unlock_request(dlm_ctxt *dlm, 
-						 dlm_lock_resource *res, 
-						 dlm_lock *lock, 
-						 dlm_lockstatus *lksb, 
+static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
+						 struct dlm_lock_resource *res,
+						 struct dlm_lock *lock,
+						 struct dlm_lockstatus *lksb,
 						 int flags,
 						 u8 owner)
 {
-	dlm_unlock_lock unlock;
+	struct dlm_unlock_lock unlock;
 	int tmpret;
-	dlm_status ret;
+	enum dlm_status ret;
 	int status = 0;
 	struct iovec iov[2];
 	size_t iovlen = 1;
@@ -294,7 +303,7 @@
 	unlock.namelen = res->lockname.len;
 	memcpy(unlock.name, res->lockname.name, unlock.namelen);
 
-	iov[0].iov_len = sizeof(dlm_unlock_lock);
+	iov[0].iov_len = sizeof(struct dlm_unlock_lock);
 	iov[0].iov_base = &unlock;
 
 	if (flags & LKM_PUT_LVB) {
@@ -305,7 +314,7 @@
 	}
 
 	dlm_unlock_lock_to_net(&unlock);
-	tmpret = o2net_send_message_iov(DLM_UNLOCK_LOCK_MSG, dlm->key, 
+	tmpret = o2net_send_message_iov(DLM_UNLOCK_LOCK_MSG, dlm->key,
 					iov, iovlen, owner, &status);
 	if (tmpret >= 0) {
 		// successfully sent and received
@@ -321,9 +330,9 @@
 		mlog_errno(tmpret);
 		if (dlm_is_host_down(tmpret)) {
 			/* NOTE: this seems strange, but it is what we want.
-			 * when the master goes down during a cancel or 
+			 * when the master goes down during a cancel or
 			 * unlock, the recovery code completes the operation
-			 * as if the master had not died, then passes the 
+			 * as if the master had not died, then passes the
 			 * updated state to the recovery master.  this thread
 			 * just needs to finish out the operation and call
 			 * the unlockast. */
@@ -338,28 +347,28 @@
 	return ret;
 }
 
-/* 
+/*
  * locking:
  *   caller needs:  none
  *   taken:         takes and drops res->spinlock
  *   held on exit:  none
- * returns: DLM_NORMAL, DLM_BADARGS, DLM_IVLOCKID, 
+ * returns: DLM_NORMAL, DLM_BADARGS, DLM_IVLOCKID,
  *          return value from dlmunlock_master
  */
 int dlm_unlock_lock_handler(o2net_msg *msg, u32 len, void *data)
 {
-	dlm_ctxt *dlm = data;
-	dlm_unlock_lock *unlock = (dlm_unlock_lock *)msg->buf;
-	dlm_lock_resource *res = NULL;
+	struct dlm_ctxt *dlm = data;
+	struct dlm_unlock_lock *unlock = (struct dlm_unlock_lock *)msg->buf;
+	struct dlm_lock_resource *res = NULL;
 	struct list_head *iter;
-	dlm_lock *lock = NULL;
-	dlm_status status = DLM_NORMAL;
+	struct dlm_lock *lock = NULL;
+	enum dlm_status status = DLM_NORMAL;
 	int found = 0, i;
-	dlm_lockstatus *lksb = NULL;
+	struct dlm_lockstatus *lksb = NULL;
 	int ignore;
 	u32 flags;
 	struct list_head *queue;
- 
+
 	dlm_unlock_lock_to_host(unlock);
 	flags = unlock->flags;
 
@@ -382,7 +391,7 @@
 	if (!dlm_grab(dlm))
 		return DLM_REJECTED;
 
-	mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), 
+	mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
 			"Domain %s not fully joined!\n", dlm->name);
 
 	mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : "none");
@@ -423,7 +432,7 @@
 
 	for (i=0; i<3; i++) {
 		list_for_each(iter, queue) {
-			lock = list_entry(iter, dlm_lock, list);
+			lock = list_entry(iter, struct dlm_lock, list);
 			if (lock->ml.cookie == unlock->cookie &&
 		    	    lock->ml.node == unlock->node_idx) {
 				dlm_lock_get(lock);
@@ -482,11 +491,13 @@
 }
 
 
-static dlm_status dlm_get_cancel_actions(dlm_ctxt *dlm, dlm_lock_resource *res, 
-					 dlm_lock *lock, dlm_lockstatus *lksb, 
-					 int *actions)
+static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm,
+					      struct dlm_lock_resource *res,
+					      struct dlm_lock *lock,
+					      struct dlm_lockstatus *lksb,
+					      int *actions)
 {
-	dlm_status status;
+	enum dlm_status status;
 
 	if (dlm_lock_on_list(&res->blocked, lock)) {
 		/* cancel this outright */
@@ -516,11 +527,13 @@
 	return status;
 }
 
-static dlm_status dlm_get_unlock_actions(dlm_ctxt *dlm, dlm_lock_resource *res,
-					 dlm_lock *lock, dlm_lockstatus *lksb, 
-					 int *actions)
+static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm,
+					      struct dlm_lock_resource *res,
+					      struct dlm_lock *lock,
+					      struct dlm_lockstatus *lksb,
+					      int *actions)
 {
-	dlm_status status;
+	enum dlm_status status;
 
 	/* unlock request */
 	if (!dlm_lock_on_list(&res->granted, lock)) {
@@ -542,12 +555,12 @@
  * since (even for the remote case) there is really
  * no work to queue up... so just do it and fire the
  * unlockast by hand when done... */
-dlm_status dlmunlock(dlm_ctxt *dlm, dlm_lockstatus *lksb, int flags, 
-		     dlm_astunlockfunc_t *unlockast, void *data)
+enum dlm_status dlmunlock(struct dlm_ctxt *dlm, struct dlm_lockstatus *lksb,
+			  int flags, dlm_astunlockfunc_t *unlockast, void *data)
 {
-	dlm_status status;
-	dlm_lock_resource *res;
-	dlm_lock *lock = NULL;
+	enum dlm_status status;
+	struct dlm_lock_resource *res;
+	struct dlm_lock *lock = NULL;
 	int call_ast, is_master;
 
 	mlog_entry_void();
@@ -583,12 +596,12 @@
 	spin_unlock(&res->spinlock);
 
 	if (is_master) {
-		status = dlmunlock_master(dlm, res, lock, lksb, flags, 
+		status = dlmunlock_master(dlm, res, lock, lksb, flags,
 					  &call_ast);
 		mlog(0, "done calling dlmunlock_master: returned %d, "
 		     "call_ast is %d\n", status, call_ast);
 	} else {
-		status = dlmunlock_remote(dlm, res, lock, lksb, flags, 
+		status = dlmunlock_remote(dlm, res, lock, lksb, flags,
 					  &call_ast);
 		mlog(0, "done calling dlmunlock_remote: returned %d, "
 		     "call_ast is %d\n", status, call_ast);

Modified: trunk/fs/ocfs2/dlm/userdlm.c
===================================================================
--- trunk/fs/ocfs2/dlm/userdlm.c	2005-06-29 22:47:38 UTC (rev 2447)
+++ trunk/fs/ocfs2/dlm/userdlm.c	2005-06-30 00:30:29 UTC (rev 2448)
@@ -73,7 +73,7 @@
 }
 
 /* I heart container_of... */
-static inline dlm_ctxt *
+static inline struct dlm_ctxt *
 dlm_ctxt_from_user_lockres(struct user_lock_res *lockres)
 {
 	struct dlmfs_inode_private *ip;
@@ -125,7 +125,7 @@
 static void user_ast(void *opaque)
 {
 	struct user_lock_res *lockres = opaque;
-	dlm_lockstatus *lksb;
+	struct dlm_lockstatus *lksb;
 
 	mlog(0, "AST fired for lockres %s\n", lockres->l_name);
 
@@ -223,7 +223,7 @@
 	wake_up(&lockres->l_event);
 }
 
-static void user_unlock_ast(void *opaque, dlm_status status)
+static void user_unlock_ast(void *opaque, enum dlm_status status)
 {
 	struct user_lock_res *lockres = opaque;
 
@@ -262,7 +262,7 @@
 {
 	int new_level, status;
 	struct user_lock_res *lockres = (struct user_lock_res *) opaque;
-	dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
+	struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
 
 	mlog(0, "processing lockres %s\n", lockres->l_name);
 
@@ -371,7 +371,7 @@
 /* predict what lock level we'll be dropping down to on behalf
  * of another node, and return true if the currently wanted
  * level will be compatible with it. */
-static inline int 
+static inline int
 user_may_continue_on_blocked_lock(struct user_lock_res *lockres,
 				  int wanted)
 {
@@ -385,7 +385,7 @@
 			  int lkm_flags)
 {
 	int status, local_flags;
-	dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
+	struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
 
 	if (level != LKM_EXMODE &&
 	    level != LKM_PRMODE) {
@@ -569,14 +569,14 @@
 	BUG_ON(dentry->d_name.len >= USER_DLM_LOCK_ID_MAX_LEN);
 
 	memcpy(lockres->l_name,
-	       dentry->d_name.name, 
+	       dentry->d_name.name,
 	       dentry->d_name.len);
 }
 
 int user_dlm_destroy_lock(struct user_lock_res *lockres)
 {
 	int status = -EBUSY;
-	dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
+	struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
 
 	mlog(0, "asked to destroy %s\n", lockres->l_name);
 
@@ -628,9 +628,9 @@
 	return status;
 }
 
-dlm_ctxt *user_dlm_register_context(struct qstr *name)
+struct dlm_ctxt *user_dlm_register_context(struct qstr *name)
 {
-	dlm_ctxt *dlm;
+	struct dlm_ctxt *dlm;
 	u32 dlm_key;
 	char *domain;
 
@@ -650,7 +650,7 @@
 	return dlm;
 }
 
-void user_dlm_unregister_context(dlm_ctxt *dlm)
+void user_dlm_unregister_context(struct dlm_ctxt *dlm)
 {
 	dlm_unregister_domain(dlm);
 }

Modified: trunk/fs/ocfs2/dlm/userdlm.h
===================================================================
--- trunk/fs/ocfs2/dlm/userdlm.h	2005-06-29 22:47:38 UTC (rev 2447)
+++ trunk/fs/ocfs2/dlm/userdlm.h	2005-06-30 00:30:29 UTC (rev 2448)
@@ -56,7 +56,7 @@
 	int                      l_level;
 	unsigned int             l_ro_holders;
 	unsigned int             l_ex_holders;
-	dlm_lockstatus           l_lksb;
+	struct dlm_lockstatus    l_lksb;
 
 	int                      l_requested;
 	int                      l_blocking;
@@ -82,11 +82,11 @@
 void user_dlm_read_lvb(struct inode *inode,
 		       char *val,
 		       unsigned int len);
-dlm_ctxt *user_dlm_register_context(struct qstr *name);
-void user_dlm_unregister_context(dlm_ctxt *dlm);
+struct dlm_ctxt *user_dlm_register_context(struct qstr *name);
+void user_dlm_unregister_context(struct dlm_ctxt *dlm);
 
 struct dlmfs_inode_private {
-	dlm_ctxt             *ip_dlm;
+	struct dlm_ctxt             *ip_dlm;
 
 	struct user_lock_res ip_lockres; /* unused for directories. */
 	struct inode         *ip_parent;

Modified: trunk/fs/ocfs2/dlmglue.c
===================================================================
--- trunk/fs/ocfs2/dlmglue.c	2005-06-29 22:47:38 UTC (rev 2447)
+++ trunk/fs/ocfs2/dlmglue.c	2005-06-30 00:30:29 UTC (rev 2448)
@@ -68,7 +68,7 @@
 
 /* so far, all locks have gotten along with the same unlock ast */
 static void ocfs2_unlock_ast_func(void *opaque,
-				  dlm_status status);
+				  enum dlm_status status);
 static int ocfs2_do_unblock_meta(struct inode *inode,
 				 int *requeue);
 static int ocfs2_unblock_meta(struct ocfs2_lock_res *lockres,
@@ -86,7 +86,7 @@
 struct ocfs2_lock_res_ops {
 	void (*ast)(void *);
 	void (*bast)(void *, int);
-	void (*unlock_ast)(void *, dlm_status);
+	void (*unlock_ast)(void *, enum dlm_status);
 	int  (*unblock)(struct ocfs2_lock_res *, int *);
 };
 
@@ -543,7 +543,7 @@
 {
 	struct ocfs2_lock_res *lockres = opaque;
 	struct inode *inode;
-	dlm_lockstatus *lksb;
+	struct dlm_lockstatus *lksb;
 
 	mlog_entry_void();
 
@@ -665,7 +665,7 @@
 
 static void ocfs2_generic_ast_func(struct ocfs2_lock_res *lockres)
 {
-	dlm_lockstatus *lksb = &lockres->l_lksb;
+	struct dlm_lockstatus *lksb = &lockres->l_lksb;
 
 	spin_lock(&lockres->l_lock);
 
@@ -811,7 +811,7 @@
 			     int flags)
 {
 	int ret = 0;
-	dlm_status status;
+	enum dlm_status status;
 
 	mlog_entry_void();
 
@@ -933,7 +933,7 @@
 {
 	struct ocfs2_lockres_flag_callback _fcb, *fcb = &_fcb;
 	struct ocfs2_status_completion sc;
-	dlm_status status;
+	enum dlm_status status;
 	int ret;
 	int catch_signals = 1;
 
@@ -1745,7 +1745,7 @@
 {
 	int status;
 	u32 dlm_key;
-	dlm_ctxt *dlm = NULL;
+	struct dlm_ctxt *dlm = NULL;
 
 	mlog_entry_void();
 
@@ -1808,7 +1808,7 @@
 	mlog_exit_void();
 }
 
-static void ocfs2_unlock_ast_func(void *opaque, dlm_status status)
+static void ocfs2_unlock_ast_func(void *opaque, enum dlm_status status)
 {
 	struct ocfs2_lock_res *lockres = opaque;
 
@@ -1848,7 +1848,7 @@
 			     struct ocfs2_lock_res *lockres)
 {
 	int ret = 0;
-	dlm_status status;
+	enum dlm_status status;
 
 	if (lockres->l_flags & OCFS2_LOCK_BUSY)
 		mlog(ML_ERROR, "destroying busy lock: \"%s\"\n",
@@ -2000,7 +2000,7 @@
 				    int lvb)
 {
 	int ret, flags = LKM_CONVERT;
-	dlm_status status;
+	enum dlm_status status;
 
 	mlog_entry_void();
 
@@ -2049,7 +2049,7 @@
 				  struct ocfs2_lock_res *lockres)
 {
 	int ret;
-	dlm_status status;
+	enum dlm_status status;
 
 	mlog_entry_void();
 

Modified: trunk/fs/ocfs2/ocfs2.h
===================================================================
--- trunk/fs/ocfs2/ocfs2.h	2005-06-29 22:47:38 UTC (rev 2447)
+++ trunk/fs/ocfs2/ocfs2.h	2005-06-30 00:30:29 UTC (rev 2448)
@@ -127,7 +127,7 @@
 	int                      l_level;
 	unsigned int             l_ro_holders;
 	unsigned int             l_ex_holders;
-	dlm_lockstatus           l_lksb;
+	struct dlm_lockstatus    l_lksb;
 	u32                      l_local_seq;
 
 	/* used from AST/BAST funcs. */
@@ -239,7 +239,7 @@
 	struct ocfs2_alloc_stats alloc_stats;
 	char dev_str[20];		/* "major,minor" of the device */
 
-	dlm_ctxt *dlm;
+	struct dlm_ctxt *dlm;
 	struct ocfs2_lock_res osb_super_lockres;
 	struct ocfs2_lock_res osb_rename_lockres;
 	struct dlm_eviction_cb osb_eviction_cb;



More information about the Ocfs2-commits mailing list