[Ocfs2-commits] khackel commits r2853 - branches/ocfs2-1.2-cert/patches
svn-commits@oss.oracle.com
svn-commits at oss.oracle.com
Wed Apr 19 17:02:59 CDT 2006
Author: khackel
Signed-off-by: jlbec
Date: 2006-04-19 17:02:49 -0500 (Wed, 19 Apr 2006)
New Revision: 2853
Added:
branches/ocfs2-1.2-cert/patches/bad-lock-during-convert-debug-msg.patch
branches/ocfs2-1.2-cert/patches/change-yield-to-cond_resched.patch
branches/ocfs2-1.2-cert/patches/dlm-calc-usage-warning.patch
branches/ocfs2-1.2-cert/patches/dlm-cancel-assert-during-migrate-notice.patch
branches/ocfs2-1.2-cert/patches/dlm-check-recovery-flag-before-convert.patch
branches/ocfs2-1.2-cert/patches/dlm-lvb-debug-pretty-print.patch
branches/ocfs2-1.2-cert/patches/dlm-more-time-needed-for-hb-detection.patch
branches/ocfs2-1.2-cert/patches/dlm-quiet-noisy-mastery-reco-msgs.patch
branches/ocfs2-1.2-cert/patches/dlm-restart-mastery-debug-msgs.patch
branches/ocfs2-1.2-cert/patches/dlm-wait-for-recovery-msgs.patch
branches/ocfs2-1.2-cert/patches/enomem-on-other-reco-node-warning.patch
branches/ocfs2-1.2-cert/patches/fix-trailing-whitespace-1.patch
branches/ocfs2-1.2-cert/patches/fix-wait-for-recovery.patch
branches/ocfs2-1.2-cert/patches/remote-lock-during-reco-msgs.patch
Modified:
branches/ocfs2-1.2-cert/patches/continue-finalize-reco.patch
branches/ocfs2-1.2-cert/patches/debug-already-on-reco-list.patch
branches/ocfs2-1.2-cert/patches/debug-mastery.patch
branches/ocfs2-1.2-cert/patches/dlm-add-migration-delay.patch
branches/ocfs2-1.2-cert/patches/dlm-allow-for-assert-while-in-progress.patch
branches/ocfs2-1.2-cert/patches/dlm-allow-recovery-to-start.patch
branches/ocfs2-1.2-cert/patches/dlm-catch-bad-reco-state.patch
branches/ocfs2-1.2-cert/patches/dlm-cleanup-reco-state-changes.patch
branches/ocfs2-1.2-cert/patches/dlm-debug-bad-migrated-lvb.patch
branches/ocfs2-1.2-cert/patches/dlm-detach-from-hb-events.patch
branches/ocfs2-1.2-cert/patches/dlm-eloop.patch
branches/ocfs2-1.2-cert/patches/dlm-fix-dlm_is_node_dead.patch
branches/ocfs2-1.2-cert/patches/dlm-fix-lvb-empty-check.patch
branches/ocfs2-1.2-cert/patches/dlm-fix-migration-asserts.patch
branches/ocfs2-1.2-cert/patches/dlm-fix-mle-refcount-while-inuse.patch
branches/ocfs2-1.2-cert/patches/dlm-fix_dlm_lock_reco_handling.patch
branches/ocfs2-1.2-cert/patches/dlm-handle-reco-network-errors.patch
branches/ocfs2-1.2-cert/patches/dlm-init-mle-when-used.patch
branches/ocfs2-1.2-cert/patches/dlm-mastery-debug-mlogs.patch
branches/ocfs2-1.2-cert/patches/dlm-mle-debugging.patch
branches/ocfs2-1.2-cert/patches/dlm-mlog_to_printk
branches/ocfs2-1.2-cert/patches/dlm-new_proc_entry
branches/ocfs2-1.2-cert/patches/dlm-recheck-master-on-unlock.patch
branches/ocfs2-1.2-cert/patches/dlm-reco-debug-mlogs.patch
branches/ocfs2-1.2-cert/patches/dlm-recovery-stage-mlogs.patch
branches/ocfs2-1.2-cert/patches/dlm-replace_gfp_kernel_with_nofs
branches/ocfs2-1.2-cert/patches/em-silence-eexist.patch
branches/ocfs2-1.2-cert/patches/fix-death-during-recovery.patch
branches/ocfs2-1.2-cert/patches/fix-dlmlock_remote.patch
branches/ocfs2-1.2-cert/patches/fix-purge-lockres.patch
branches/ocfs2-1.2-cert/patches/fix-recovery-spin.patch
branches/ocfs2-1.2-cert/patches/fix-remote-lock-during-reco.patch
branches/ocfs2-1.2-cert/patches/fix-wait-in-mastery.patch
branches/ocfs2-1.2-cert/patches/hb-add_tracking_around_configured_nodes
branches/ocfs2-1.2-cert/patches/hold-dirty-ref.patch
branches/ocfs2-1.2-cert/patches/hold-recovery-ref.patch
branches/ocfs2-1.2-cert/patches/jrnl-change_gfp_kernel_to_nofs
branches/ocfs2-1.2-cert/patches/lockres-release-info.patch
branches/ocfs2-1.2-cert/patches/lvb-recovery-fix.patch
branches/ocfs2-1.2-cert/patches/mar24-create-lock-handler.patch
branches/ocfs2-1.2-cert/patches/mastery-restart-recovery.patch
branches/ocfs2-1.2-cert/patches/move-dlm-work-to-thread.patch
branches/ocfs2-1.2-cert/patches/never-purge-master.patch
branches/ocfs2-1.2-cert/patches/ocfs2-1.2-no-idr-0.patch
branches/ocfs2-1.2-cert/patches/ocfs2-extend_tracing2.patch
branches/ocfs2-1.2-cert/patches/ocfs2-journal_start_stop_msgs.patch
branches/ocfs2-1.2-cert/patches/ocfs2-reco_nofs.patch
branches/ocfs2-1.2-cert/patches/ocfs2_dlm-do_lvb_puts_inline2.patch
branches/ocfs2-1.2-cert/patches/ocfs2_heartbeat-better_I_O_error_handling.patch
branches/ocfs2-1.2-cert/patches/reassert-vs-migration.patch
branches/ocfs2-1.2-cert/patches/remove-bad-spin-unlock.patch
branches/ocfs2-1.2-cert/patches/series
branches/ocfs2-1.2-cert/patches/two-stage-finalize.patch
Log:
break out remaining patchs
Signed-off-by: jlbec
Added: branches/ocfs2-1.2-cert/patches/bad-lock-during-convert-debug-msg.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/bad-lock-during-convert-debug-msg.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/bad-lock-during-convert-debug-msg.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -0,0 +1,13 @@
+Index: cert3/fs/ocfs2/dlm/dlmconvert.c
+===================================================================
+--- cert3.orig/fs/ocfs2/dlm/dlmconvert.c 2006-04-19 14:50:16.975963000 -0700
++++ cert3/fs/ocfs2/dlm/dlmconvert.c 2006-04-19 14:50:19.146319000 -0700
+@@ -487,7 +487,7 @@ int dlm_convert_lock_handler(struct o2ne
+ list_for_each(iter, &res->granted) {
+ lock = list_entry(iter, struct dlm_lock, list);
+ if (lock->ml.node == cnv->node_idx) {
+- mlog(0, "ahaha there is something here "
++ mlog(ML_ERROR, "ahaha there is something here "
+ "for node %u, lock->ml.cookie=%llu, "
+ "cnv->cookie=%llu\n", cnv->node_idx,
+ lock->ml.cookie, cnv->cookie);
Added: branches/ocfs2-1.2-cert/patches/change-yield-to-cond_resched.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/change-yield-to-cond_resched.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/change-yield-to-cond_resched.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -0,0 +1,13 @@
+Index: cert3/fs/ocfs2/dlm/dlmthread.c
+===================================================================
+--- cert3.orig/fs/ocfs2/dlm/dlmthread.c 2006-04-19 14:49:35.477211000 -0700
++++ cert3/fs/ocfs2/dlm/dlmthread.c 2006-04-19 14:50:10.153217000 -0700
+@@ -750,7 +750,7 @@ in_progress:
+
+ /* yield and continue right away if there is more work to do */
+ if (!n) {
+- yield();
++ cond_resched();
+ continue;
+ }
+
Modified: branches/ocfs2-1.2-cert/patches/continue-finalize-reco.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/continue-finalize-reco.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/continue-finalize-reco.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmrecovery.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 12:15:37.440041000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 12:15:40.321072000 -0700
-@@ -2651,6 +2651,7 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:49:55.629726000 -0700
++++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:49:57.748081000 -0700
+@@ -2650,6 +2650,7 @@ stage2:
mlog(ML_ERROR, "node %u went down after this "
"node finished recovery.\n", nodenum);
ret = 0;
Modified: branches/ocfs2-1.2-cert/patches/debug-already-on-reco-list.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/debug-already-on-reco-list.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/debug-already-on-reco-list.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmrecovery.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-18 14:51:31.000000000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-18 14:52:33.853317000 -0700
-@@ -1783,7 +1783,7 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:48:55.689045000 -0700
++++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:48:58.026401000 -0700
+@@ -1783,7 +1783,7 @@ void dlm_move_lockres_to_recovery_list(s
res->state |= DLM_LOCK_RES_RECOVERING;
if (!list_empty(&res->recovering)) {
Modified: branches/ocfs2-1.2-cert/patches/debug-mastery.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/debug-mastery.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/debug-mastery.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmmaster.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-18 14:47:43.682671000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-18 14:49:41.605792000 -0700
-@@ -1629,6 +1629,8 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:48:51.523333000 -0700
++++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:48:53.584689000 -0700
+@@ -1629,6 +1629,8 @@ again:
dlm_node_iter_init(nodemap, &iter);
while ((to = dlm_node_iter_next(&iter)) >= 0) {
int r = 0;
@@ -11,7 +11,7 @@
mlog(0, "sending assert master to %d (%.*s)\n", to,
namelen, lockname);
memset(&assert, 0, sizeof(assert));
-@@ -1653,7 +1655,15 @@
+@@ -1653,7 +1655,15 @@ again:
/* ok, something horribly messed. kill thyself. */
mlog(ML_ERROR,"during assert master of %.*s to %u, "
"got %d.\n", namelen, lockname, to, r);
@@ -28,7 +28,7 @@
BUG();
} else if (r == EAGAIN) {
mlog(0, "%.*s: node %u create mles on other "
-@@ -1917,12 +1927,12 @@
+@@ -1917,12 +1927,12 @@ done:
kill:
/* kill the caller! */
Modified: branches/ocfs2-1.2-cert/patches/dlm-add-migration-delay.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-add-migration-delay.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-add-migration-delay.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,7 +1,7 @@
Index: cert3/fs/ocfs2/dlm/dlmthread.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmthread.c 2006-04-17 16:06:38.000000000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmthread.c 2006-04-17 16:23:51.218905000 -0700
+--- cert3.orig/fs/ocfs2/dlm/dlmthread.c 2006-04-19 14:46:51.470976000 -0700
++++ cert3/fs/ocfs2/dlm/dlmthread.c 2006-04-19 14:48:06.522823000 -0700
@@ -39,6 +39,7 @@
#include <linux/inet.h>
#include <linux/timer.h>
@@ -10,7 +10,7 @@
#include "cluster/heartbeat.h"
-@@ -166,6 +167,7 @@
+@@ -166,6 +167,7 @@ again:
} else if (ret < 0) {
mlog(ML_NOTICE, "lockres %.*s: migrate failed, retrying\n",
lockres->lockname.len, lockres->lockname.name);
@@ -18,7 +18,7 @@
goto again;
}
-@@ -658,8 +660,9 @@
+@@ -658,8 +660,9 @@ static int dlm_thread(void *data)
* spinlock and do NOT have the dlm lock.
* safe to reserve/queue asts and run the lists. */
Modified: branches/ocfs2-1.2-cert/patches/dlm-allow-for-assert-while-in-progress.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-allow-for-assert-while-in-progress.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-allow-for-assert-while-in-progress.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmmaster.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-18 13:49:09.000000000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-18 13:51:10.861562000 -0700
-@@ -1737,7 +1737,8 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:48:35.031807000 -0700
++++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:48:37.103163000 -0700
+@@ -1737,7 +1737,8 @@ int dlm_assert_master_handler(struct o2n
goto kill;
}
if (!mle) {
Modified: branches/ocfs2-1.2-cert/patches/dlm-allow-recovery-to-start.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-allow-recovery-to-start.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-allow-recovery-to-start.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmmaster.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-18 13:41:49.000000000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-18 13:44:57.764922000 -0700
-@@ -856,6 +856,7 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:48:28.865739000 -0700
++++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:48:31.000095000 -0700
+@@ -856,6 +856,7 @@ lookup:
}
dlm_kick_recovery_thread(dlm);
Added: branches/ocfs2-1.2-cert/patches/dlm-calc-usage-warning.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-calc-usage-warning.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-calc-usage-warning.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -0,0 +1,14 @@
+Index: cert3/fs/ocfs2/dlm/dlmthread.c
+===================================================================
+--- cert3.orig/fs/ocfs2/dlm/dlmthread.c 2006-04-19 14:49:33.180131000 -0700
++++ cert3/fs/ocfs2/dlm/dlmthread.c 2006-04-19 14:49:35.477211000 -0700
+@@ -111,7 +111,8 @@ void __dlm_lockres_calc_usage(struct dlm
+ if (res->owner == dlm->node_num)
+ {
+ if (!list_empty(&res->purge)) {
+- mlog(0, "we master %s:%.*s, but it is on "
++ mlog(ML_NOTICE,
++ "we master %s:%.*s, but it is on "
+ "the purge list. Removing\n",
+ dlm->name, res->lockname.len,
+ res->lockname.name);
Added: branches/ocfs2-1.2-cert/patches/dlm-cancel-assert-during-migrate-notice.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-cancel-assert-during-migrate-notice.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-cancel-assert-during-migrate-notice.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -0,0 +1,14 @@
+Index: cert3/fs/ocfs2/dlm/dlmmaster.c
+===================================================================
+--- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:49:37.599521000 -0700
++++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:49:39.793877000 -0700
+@@ -2003,7 +2003,8 @@ void dlm_assert_master_worker(struct dlm
+ */
+ spin_lock(&res->spinlock);
+ if (res->state & DLM_LOCK_RES_MIGRATING) {
+- mlog(0, "Someone asked us to assert mastery, but we're "
++ mlog(ML_NOTICE,
++ "Someone asked us to assert mastery, but we're "
+ "in the middle of migration. Skipping assert, "
+ "the new master will handle that.\n");
+ spin_unlock(&res->spinlock);
Modified: branches/ocfs2-1.2-cert/patches/dlm-catch-bad-reco-state.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-catch-bad-reco-state.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-catch-bad-reco-state.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmrecovery.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-17 17:36:08.000000000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-17 17:45:42.476500000 -0700
-@@ -709,6 +709,14 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:48:13.531570000 -0700
++++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:48:17.310441000 -0700
+@@ -709,6 +709,14 @@ int dlm_request_all_locks_handler(struct
if (!dlm_grab(dlm))
return -EINVAL;
@@ -17,7 +17,7 @@
BUG_ON(lr->dead_node != dlm->reco.dead_node);
item = kcalloc(1, sizeof(*item), GFP_KERNEL);
-@@ -1502,7 +1510,7 @@
+@@ -1502,7 +1510,7 @@ static int dlm_process_recovery_data(str
struct dlm_lock *newlock = NULL;
struct dlm_lockstatus *lksb = NULL;
int ret = 0;
@@ -26,7 +26,7 @@
struct list_head *iter;
struct dlm_lock *lock = NULL;
-@@ -1612,9 +1620,33 @@
+@@ -1612,9 +1620,33 @@ static int dlm_process_recovery_data(str
* relative to each other, but clearly *not*
* preserved relative to locks from other nodes.
*/
Added: branches/ocfs2-1.2-cert/patches/dlm-check-recovery-flag-before-convert.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-check-recovery-flag-before-convert.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-check-recovery-flag-before-convert.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -0,0 +1,38 @@
+Index: cert3/fs/ocfs2/dlm/dlmconvert.c
+===================================================================
+--- cert3.orig/fs/ocfs2/dlm/dlmconvert.c 2006-04-19 14:48:49.395074000 -0700
++++ cert3/fs/ocfs2/dlm/dlmconvert.c 2006-04-19 14:50:16.975963000 -0700
+@@ -467,6 +467,12 @@ int dlm_convert_lock_handler(struct o2ne
+ }
+
+ spin_lock(&res->spinlock);
++ status = __dlm_lockres_state_to_status(res);
++ if (status != DLM_NORMAL) {
++ spin_unlock(&res->spinlock);
++ dlm_error(status);
++ goto leave;
++ }
+ list_for_each(iter, &res->granted) {
+ lock = list_entry(iter, struct dlm_lock, list);
+ if (lock->ml.cookie == cnv->cookie &&
+@@ -476,6 +482,20 @@ int dlm_convert_lock_handler(struct o2ne
+ }
+ lock = NULL;
+ }
++ if (!lock) {
++ __dlm_print_one_lock_resource(res);
++ list_for_each(iter, &res->granted) {
++ lock = list_entry(iter, struct dlm_lock, list);
++ if (lock->ml.node == cnv->node_idx) {
++ mlog(0, "ahaha there is something here "
++ "for node %u, lock->ml.cookie=%llu, "
++ "cnv->cookie=%llu\n", cnv->node_idx,
++ lock->ml.cookie, cnv->cookie);
++ break;
++ }
++ }
++ lock = NULL;
++ }
+ spin_unlock(&res->spinlock);
+ if (!lock) {
+ status = DLM_IVLOCKID;
Modified: branches/ocfs2-1.2-cert/patches/dlm-cleanup-reco-state-changes.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-cleanup-reco-state-changes.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-cleanup-reco-state-changes.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmrecovery.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-17 17:06:50.000000000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-17 17:36:08.436692000 -0700
-@@ -115,12 +115,31 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:48:11.497214000 -0700
++++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:48:13.531570000 -0700
+@@ -115,12 +115,31 @@ static u64 dlm_get_next_mig_cookie(void)
return c;
}
@@ -36,7 +36,7 @@
spin_unlock(&dlm->spinlock);
}
-@@ -341,7 +360,7 @@
+@@ -341,7 +360,7 @@ int dlm_do_recovery(struct dlm_ctxt *dlm
mlog(0, "new master %u died while recovering %u!\n",
dlm->reco.new_master, dlm->reco.dead_node);
/* unset the new_master, leave dead_node */
@@ -45,7 +45,7 @@
}
/* select a target to recover */
-@@ -350,14 +369,14 @@
+@@ -350,14 +369,14 @@ int dlm_do_recovery(struct dlm_ctxt *dlm
bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0);
if (bit >= O2NM_MAX_NODES || bit < 0)
@@ -63,7 +63,7 @@
}
if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
-@@ -2104,7 +2123,7 @@
+@@ -2104,7 +2123,7 @@ again:
/* set the new_master to this node */
spin_lock(&dlm->spinlock);
@@ -72,7 +72,7 @@
spin_unlock(&dlm->spinlock);
}
-@@ -2269,8 +2288,8 @@
+@@ -2269,8 +2288,8 @@ int dlm_begin_reco_handler(struct o2net_
"node %u changing it to %u\n", dlm->name,
dlm->reco.dead_node, br->node_idx, br->dead_node);
}
Modified: branches/ocfs2-1.2-cert/patches/dlm-debug-bad-migrated-lvb.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-debug-bad-migrated-lvb.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-debug-bad-migrated-lvb.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmrecovery.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 12:10:44.996421000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 12:10:53.135773000 -0700
-@@ -1713,8 +1713,19 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:49:00.203731000 -0700
++++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:49:02.398893000 -0700
+@@ -1713,8 +1713,19 @@ static int dlm_process_recovery_data(str
if (!dlm_lvb_is_empty(res->lvb) &&
(ml->type == LKM_EXMODE ||
memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
Modified: branches/ocfs2-1.2-cert/patches/dlm-detach-from-hb-events.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-detach-from-hb-events.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-detach-from-hb-events.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmmaster.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-18 13:28:33.000000000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-18 13:41:49.174123000 -0700
-@@ -2665,6 +2665,7 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:48:26.547383000 -0700
++++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:48:28.865739000 -0700
+@@ -2665,6 +2665,7 @@ static int dlm_add_migration_mle(struct
/* remove it from the list so that only one
* mle will be found */
list_del_init(&tmp->list);
@@ -10,7 +10,7 @@
}
spin_unlock(&tmp->spinlock);
}
-@@ -2758,6 +2759,7 @@
+@@ -2758,6 +2759,7 @@ top:
/* remove from the list early. NOTE: unlinking
* list_head while in list_for_each_safe */
Modified: branches/ocfs2-1.2-cert/patches/dlm-eloop.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-eloop.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-eloop.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: fs/ocfs2/dlm/dlmmaster.c
===================================================================
---- fs/ocfs2/dlm/dlmmaster.c.orig 2006-04-19 11:13:11.005353000 -0700
-+++ fs/ocfs2/dlm/dlmmaster.c 2006-04-19 12:15:01.917663000 -0700
-@@ -991,12 +991,14 @@
+--- fs/ocfs2/dlm/dlmmaster.c.orig 2006-04-19 14:48:53.584689000 -0700
++++ fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:49:08.735859000 -0700
+@@ -991,12 +991,14 @@ recheck:
spin_unlock(&res->spinlock);
/* this will cause the master to re-assert across
* the whole cluster, freeing up mles */
Modified: branches/ocfs2-1.2-cert/patches/dlm-fix-dlm_is_node_dead.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-fix-dlm_is_node_dead.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-fix-dlm_is_node_dead.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmrecovery.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-17 16:06:38.000000000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-17 16:46:33.052103000 -0700
-@@ -267,7 +267,7 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:46:50.413297000 -0700
++++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:48:10.105859000 -0700
+@@ -267,7 +267,7 @@ int dlm_is_node_dead(struct dlm_ctxt *dl
{
int dead;
spin_lock(&dlm->spinlock);
Modified: branches/ocfs2-1.2-cert/patches/dlm-fix-lvb-empty-check.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-fix-lvb-empty-check.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-fix-lvb-empty-check.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmcommon.h
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmcommon.h 2006-04-17 16:06:38.000000000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmcommon.h 2006-04-17 17:06:50.062688000 -0700
-@@ -300,6 +300,15 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmcommon.h 2006-04-19 14:46:49.510619000 -0700
++++ cert3/fs/ocfs2/dlm/dlmcommon.h 2006-04-19 14:48:11.476213000 -0700
+@@ -300,6 +300,15 @@ enum dlm_lockres_list {
DLM_BLOCKED_LIST
};
@@ -20,9 +20,9 @@
{
Index: cert3/fs/ocfs2/dlm/dlmrecovery.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-17 16:46:33.000000000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-17 17:06:50.380689000 -0700
-@@ -1022,8 +1022,9 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:48:10.105859000 -0700
++++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:48:11.497214000 -0700
+@@ -1022,8 +1022,9 @@ static int dlm_add_lock_to_array(struct
ml->type == LKM_PRMODE) {
/* if it is already set, this had better be a PR
* and it has to match */
@@ -34,7 +34,7 @@
mlog(ML_ERROR, "mismatched lvbs!\n");
__dlm_print_one_lock_resource(lock->lockres);
BUG();
-@@ -1553,7 +1554,7 @@
+@@ -1553,7 +1554,7 @@ static int dlm_process_recovery_data(str
lksb->flags |= (ml->flags &
(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
@@ -43,7 +43,7 @@
if (lksb->flags & DLM_LKSB_PUT_LVB) {
/* other node was trying to update
* lvb when node died. recreate the
-@@ -1564,8 +1565,9 @@
+@@ -1564,8 +1565,9 @@ static int dlm_process_recovery_data(str
* most recent valid lvb info */
BUG_ON(ml->type != LKM_EXMODE &&
ml->type != LKM_PRMODE);
Modified: branches/ocfs2-1.2-cert/patches/dlm-fix-migration-asserts.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-fix-migration-asserts.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-fix-migration-asserts.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmmaster.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-18 13:47:15.000000000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-18 13:49:09.809282000 -0700
-@@ -1706,6 +1706,23 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:48:33.046451000 -0700
++++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:48:35.031807000 -0700
+@@ -1706,6 +1706,23 @@ int dlm_assert_master_handler(struct o2n
assert->node_idx);
}
}
Modified: branches/ocfs2-1.2-cert/patches/dlm-fix-mle-refcount-while-inuse.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-fix-mle-refcount-while-inuse.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-fix-mle-refcount-while-inuse.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmmaster.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-17 18:04:34.000000000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-18 13:28:33.258802000 -0700
-@@ -73,6 +73,7 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:48:24.176326000 -0700
++++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:48:26.547383000 -0700
+@@ -73,6 +73,7 @@ struct dlm_master_list_entry
wait_queue_head_t wq;
atomic_t woken;
struct kref mle_refs;
@@ -10,7 +10,7 @@
unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
-@@ -334,6 +335,31 @@
+@@ -334,6 +335,31 @@ static inline void dlm_mle_detach_hb_eve
spin_unlock(&dlm->spinlock);
}
@@ -42,7 +42,7 @@
/* remove from list and free */
static void __dlm_put_mle(struct dlm_master_list_entry *mle)
{
-@@ -387,6 +413,7 @@
+@@ -387,6 +413,7 @@ static void dlm_init_mle(struct dlm_mast
memset(mle->response_map, 0, sizeof(mle->response_map));
mle->master = O2NM_MAX_NODES;
mle->new_master = O2NM_MAX_NODES;
@@ -50,7 +50,7 @@
if (mle->type == DLM_MLE_MASTER) {
BUG_ON(!res);
-@@ -807,7 +834,7 @@
+@@ -807,7 +834,7 @@ lookup:
* if so, the creator of the BLOCK may try to put the last
* ref at this time in the assert master handler, so we
* need an extra one to keep from a bad ptr deref. */
@@ -59,7 +59,7 @@
spin_unlock(&dlm->master_lock);
spin_unlock(&dlm->spinlock);
-@@ -896,7 +923,7 @@
+@@ -896,7 +923,7 @@ wait:
dlm_mle_detach_hb_events(dlm, mle);
dlm_put_mle(mle);
/* put the extra ref */
@@ -68,7 +68,7 @@
wake_waiters:
spin_lock(&res->spinlock);
-@@ -1748,6 +1775,7 @@
+@@ -1748,6 +1775,7 @@ ok:
if (mle) {
int extra_ref = 0;
int nn = -1;
@@ -76,7 +76,7 @@
spin_lock(&mle->spinlock);
if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
-@@ -1767,27 +1795,64 @@
+@@ -1767,27 +1795,64 @@ ok:
wake_up(&mle->wq);
spin_unlock(&mle->spinlock);
@@ -154,7 +154,7 @@
}
}
-@@ -2133,7 +2198,7 @@
+@@ -2133,7 +2198,7 @@ fail:
* take both dlm->spinlock and dlm->master_lock */
spin_lock(&dlm->spinlock);
spin_lock(&dlm->master_lock);
@@ -163,7 +163,7 @@
spin_unlock(&dlm->master_lock);
spin_unlock(&dlm->spinlock);
-@@ -2150,7 +2215,10 @@
+@@ -2150,7 +2215,10 @@ fail:
/* migration failed, detach and clean up mle */
dlm_mle_detach_hb_events(dlm, mle);
dlm_put_mle(mle);
@@ -175,7 +175,7 @@
goto leave;
}
-@@ -2191,7 +2259,10 @@
+@@ -2191,7 +2259,10 @@ fail:
/* migration failed, detach and clean up mle */
dlm_mle_detach_hb_events(dlm, mle);
dlm_put_mle(mle);
@@ -187,7 +187,7 @@
goto leave;
}
/* TODO: if node died: stop, clean up, return error */
-@@ -2207,7 +2278,7 @@
+@@ -2207,7 +2278,7 @@ fail:
/* master is known, detach if not already detached */
dlm_mle_detach_hb_events(dlm, mle);
Modified: branches/ocfs2-1.2-cert/patches/dlm-fix_dlm_lock_reco_handling.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-fix_dlm_lock_reco_handling.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-fix_dlm_lock_reco_handling.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmlock.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmlock.c 2006-04-19 12:15:45.634457000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmlock.c 2006-04-19 12:16:04.581320000 -0700
-@@ -299,7 +299,7 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmlock.c 2006-04-19 14:50:04.744827000 -0700
++++ cert3/fs/ocfs2/dlm/dlmlock.c 2006-04-19 14:50:28.078421000 -0700
+@@ -299,7 +299,7 @@ static enum dlm_status dlm_send_remote_l
if (tmpret >= 0) {
// successfully sent and received
ret = status; // this is already a dlm_status
@@ -11,7 +11,7 @@
mlog(ML_ERROR, "%s:%.*s: BUG. this is a stale lockres "
"no longer owned by %u. that node is coming back "
"up currently.\n", dlm->name, create.namelen,
-@@ -457,7 +457,7 @@
+@@ -457,7 +457,7 @@ int dlm_create_lock_handler(struct o2net
name = create->name;
namelen = create->namelen;
Modified: branches/ocfs2-1.2-cert/patches/dlm-handle-reco-network-errors.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-handle-reco-network-errors.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-handle-reco-network-errors.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmrecovery.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-17 17:45:42.000000000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-17 17:54:13.588703000 -0700
-@@ -756,6 +756,7 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:48:17.310441000 -0700
++++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:48:19.765638000 -0700
+@@ -756,6 +756,7 @@ static void dlm_request_all_locks_worker
struct list_head *iter;
int ret;
u8 dead_node, reco_master;
@@ -10,7 +10,7 @@
dlm = item->dlm;
dead_node = item->u.ral.dead_node;
-@@ -792,12 +793,18 @@
+@@ -792,12 +793,18 @@ static void dlm_request_all_locks_worker
dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
/* now we can begin blasting lockreses without the dlm lock */
@@ -30,7 +30,7 @@
}
/* move the resources back to the list */
-@@ -805,9 +812,12 @@
+@@ -805,9 +812,12 @@ static void dlm_request_all_locks_worker
list_splice_init(&resources, &dlm->reco.resources);
spin_unlock(&dlm->spinlock);
@@ -46,7 +46,7 @@
free_page((unsigned long)data);
}
-@@ -827,8 +837,14 @@
+@@ -827,8 +837,14 @@ static int dlm_send_all_done_msg(struct
ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
sizeof(done_msg), send_to, &tmpret);
@@ -63,7 +63,7 @@
ret = tmpret;
return ret;
}
-@@ -1110,22 +1126,25 @@
+@@ -1110,22 +1126,25 @@ int dlm_send_one_lockres(struct dlm_ctxt
* we must send it immediately. */
ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
res, total_locks);
Modified: branches/ocfs2-1.2-cert/patches/dlm-init-mle-when-used.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-init-mle-when-used.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-init-mle-when-used.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmmaster.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-18 13:44:57.000000000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-18 13:47:15.129070000 -0700
-@@ -1509,15 +1509,12 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:48:31.000095000 -0700
++++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:48:33.046451000 -0700
+@@ -1509,15 +1509,12 @@ way_up_top:
mlog_errno(-ENOMEM);
goto send_response;
}
Added: branches/ocfs2-1.2-cert/patches/dlm-lvb-debug-pretty-print.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-lvb-debug-pretty-print.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-lvb-debug-pretty-print.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -0,0 +1,101 @@
+Index: cert3/fs/ocfs2/dlm/dlmrecovery.c
+===================================================================
+--- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:49:10.803215000 -0700
++++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:49:12.946571000 -0700
+@@ -1604,6 +1604,66 @@ dlm_list_num_to_pointer(struct dlm_lock_
+ * TODO: do MIGRATING and RECOVERING spinning
+ */
+
++#define DLM_OCFS2_SEC_SHIFT (64 - 34)
++#define DLM_OCFS2_NSEC_MASK ((1ULL << DLM_OCFS2_SEC_SHIFT) - 1)
++
++struct floo {
++ __be32 lvb_old_seq;
++ __be32 lvb_version;
++ __be32 lvb_iclusters;
++ __be32 lvb_iuid;
++ __be32 lvb_igid;
++ __be16 lvb_imode;
++ __be16 lvb_inlink;
++ __be64 lvb_iatime_packed;
++ __be64 lvb_ictime_packed;
++ __be64 lvb_imtime_packed;
++ __be64 lvb_isize;
++ __be32 lvb_reserved[2];
++};
++
++// OLDSEQ-- VERSION- CLUSTERS UIUD---- IGID---- MODE NLNK ATIMEPACKED----- CTIMEPACKED----- MTIMEPACKED----- ISIZE----------- RESERVED--------
++// 00000000 00000001 00000001 0000c09f 00000262 41ff 0006 10f45a50844bfa5b 110885ed11acf024 110885ed11acf024 0000000000003000 0000000000000000
++static inline void dlm_print_ocfs2_lvb(unsigned char *lvb)
++{
++ struct floo *raw = (struct floo *)lvb;
++ u32 clusters, uid, gid, oldseq, vers;
++ u16 mode, nlink;
++ u64 isize, atime, mtime, ctime;
++ /* just do some lame decoding, doesn't need to be too
++ * accurate, just cut the encoded value into smaller values */
++
++
++ oldseq = be32_to_cpu(raw->lvb_old_seq);
++ vers = be32_to_cpu(raw->lvb_version);
++ clusters= be32_to_cpu(raw->lvb_iclusters);
++ isize = be64_to_cpu(raw->lvb_isize);
++ uid = be32_to_cpu(raw->lvb_iuid);
++ gid = be32_to_cpu(raw->lvb_igid);
++ mode = be16_to_cpu(raw->lvb_imode);
++ nlink = be16_to_cpu(raw->lvb_inlink);
++ /* just print out the tv_sec portion */
++ atime = be64_to_cpu(raw->lvb_iatime_packed) >> DLM_OCFS2_SEC_SHIFT;
++ mtime = be64_to_cpu(raw->lvb_imtime_packed) >> DLM_OCFS2_SEC_SHIFT;
++ ctime = be64_to_cpu(raw->lvb_ictime_packed) >> DLM_OCFS2_SEC_SHIFT;
++ printk("[%u:%u:%u:%llu:%u:%u:%u:%u:%llu:%llu:%llu]", oldseq, vers,
++ clusters, (unsigned long long)isize, uid, gid, mode,
++ nlink, (unsigned long long)atime,
++ (unsigned long long)mtime, (unsigned long long)ctime);
++}
++
++static inline void dlm_print_lvb(unsigned char *lvb)
++{
++#if 0
++ int i;
++ for (i=0; i<DLM_LVB_LEN; i++)
++ printk("%02x", (unsigned char)lvb[i]);
++#endif
++
++ dlm_print_ocfs2_lvb(lvb);
++}
++
++
+ /*
+ * NOTE about in-flight requests during migration:
+ *
+@@ -1729,17 +1789,19 @@ static int dlm_process_recovery_data(str
+ if (!dlm_lvb_is_empty(res->lvb) &&
+ (ml->type == LKM_EXMODE ||
+ memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
+- int i;
+- mlog(ML_ERROR, "%s:%.*s: received bad "
+- "lvb! type=%d\n", dlm->name,
+- res->lockname.len,
+- res->lockname.name, ml->type);
++ u64 c = be64_to_cpu(lock->ml.cookie);
++ mlog(ML_ERROR, "%s:%.*s: received bad "
++ "lvb! type=%d, convtype=%d, "
++ "node=%u, cookie=%u:%llu\n",
++ dlm->name, res->lockname.len,
++ res->lockname.name, ml->type,
++ ml->convert_type, ml->node,
++ dlm_get_lock_cookie_node(c),
++ dlm_get_lock_cookie_seq(c));
+ printk("lockres lvb=[");
+- for (i=0; i<DLM_LVB_LEN; i++)
+- printk("%02x", res->lvb[i]);
+- printk("]\nmigrated lvb=[");
+- for (i=0; i<DLM_LVB_LEN; i++)
+- printk("%02x", mres->lvb[i]);
++ dlm_print_lvb(res->lvb);
++ printk("]\nmigrated lvb=[");
++ dlm_print_lvb(mres->lvb);
+ printk("]\n");
+ dlm_print_one_lock_resource(res);
+ BUG();
Modified: branches/ocfs2-1.2-cert/patches/dlm-mastery-debug-mlogs.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-mastery-debug-mlogs.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-mastery-debug-mlogs.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmmaster.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-18 13:51:10.000000000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-18 13:58:02.717881000 -0700
-@@ -368,9 +368,13 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:48:37.103163000 -0700
++++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:48:39.109519000 -0700
+@@ -368,9 +368,13 @@ static void __dlm_put_mle(struct dlm_mas
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&dlm->master_lock);
@@ -19,7 +19,7 @@
}
-@@ -1006,6 +1010,12 @@
+@@ -1006,6 +1010,12 @@ recheck:
"rechecking now\n", dlm->name, res->lockname.len,
res->lockname.name);
goto recheck;
@@ -32,7 +32,7 @@
}
if (m != O2NM_MAX_NODES) {
-@@ -1687,7 +1697,7 @@
+@@ -1687,7 +1697,7 @@ int dlm_assert_master_handler(struct o2n
if (bit >= O2NM_MAX_NODES) {
/* not necessarily an error, though less likely.
* could be master just re-asserting. */
@@ -41,7 +41,7 @@
"is asserting! (%.*s)\n", assert->node_idx,
namelen, name);
} else if (bit != assert->node_idx) {
-@@ -1699,7 +1709,7 @@
+@@ -1699,7 +1709,7 @@ int dlm_assert_master_handler(struct o2n
* number winning the mastery will respond
* YES to mastery requests, but this node
* had no way of knowing. let it pass. */
@@ -50,7 +50,7 @@
"%u is asserting. (%.*s) %u must "
"have begun after %u won.\n", bit,
assert->node_idx, namelen, name, bit,
-@@ -1866,7 +1876,7 @@
+@@ -1866,7 +1876,7 @@ ok:
spin_unlock(&dlm->spinlock);
} else if (res) {
if (res->owner != assert->node_idx) {
@@ -59,7 +59,7 @@
"owner is %u (%.*s), no mle\n", assert->node_idx,
res->owner, namelen, name);
}
-@@ -2264,8 +2274,8 @@
+@@ -2264,8 +2274,8 @@ fail:
/* avoid hang during shutdown when migrating lockres
* to a node which also goes down */
if (dlm_is_node_dead(dlm, target)) {
@@ -70,7 +70,7 @@
dlm->name, res->lockname.len,
res->lockname.name, target);
ret = -ERESTARTSYS;
-@@ -2782,8 +2792,8 @@
+@@ -2782,8 +2792,8 @@ top:
spin_unlock(&mle->spinlock);
wake_up(&mle->wq);
Modified: branches/ocfs2-1.2-cert/patches/dlm-mle-debugging.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-mle-debugging.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-mle-debugging.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmmaster.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-17 16:06:38.000000000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-17 18:04:34.193499000 -0700
-@@ -123,15 +123,30 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:46:46.223769000 -0700
++++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:48:24.176326000 -0700
+@@ -123,15 +123,30 @@ static inline int dlm_mle_equal(struct d
return 1;
}
@@ -34,7 +34,7 @@
k = &mle->mle_refs;
if (mle->type == DLM_MLE_BLOCK)
-@@ -152,9 +167,18 @@
+@@ -152,9 +167,18 @@ void dlm_print_one_mle(struct dlm_master
name = mle->u.res->lockname.name;
}
@@ -56,7 +56,7 @@
}
-@@ -166,7 +190,6 @@
+@@ -166,7 +190,6 @@ static void dlm_dump_mles(struct dlm_ctx
struct list_head *iter;
mlog(ML_NOTICE, "dumping all mles for domain %s:\n", dlm->name);
Modified: branches/ocfs2-1.2-cert/patches/dlm-mlog_to_printk
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-mlog_to_printk 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-mlog_to_printk 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmdomain.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmdomain.c 2006-04-18 14:34:28.603458000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmdomain.c 2006-04-19 12:15:34.858011000 -0700
-@@ -381,12 +381,13 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmdomain.c 2006-04-19 14:45:53.062961000 -0700
++++ cert3/fs/ocfs2/dlm/dlmdomain.c 2006-04-19 14:49:53.198691000 -0700
+@@ -381,12 +381,13 @@ static void __dlm_print_nodes(struct dlm
assert_spin_locked(&dlm->spinlock);
@@ -18,7 +18,7 @@
}
static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data)
-@@ -402,7 +403,7 @@
+@@ -402,7 +403,7 @@ static int dlm_exit_domain_handler(struc
node = exit_msg->node_idx;
@@ -27,7 +27,7 @@
spin_lock(&dlm->spinlock);
clear_bit(node, dlm->domain_map);
-@@ -651,6 +652,8 @@
+@@ -651,6 +652,8 @@ static int dlm_assert_joined_handler(str
set_bit(assert->node_idx, dlm->domain_map);
__dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
@@ -38,9 +38,9 @@
/* notify anything attached to the heartbeat events */
Index: cert3/fs/ocfs2/super.c
===================================================================
---- cert3.orig/fs/ocfs2/super.c 2006-04-18 14:34:28.904456000 -0700
-+++ cert3/fs/ocfs2/super.c 2006-04-19 12:15:34.872010000 -0700
-@@ -641,9 +641,8 @@
+--- cert3.orig/fs/ocfs2/super.c 2006-04-19 14:45:53.066958000 -0700
++++ cert3/fs/ocfs2/super.c 2006-04-19 14:49:53.214690000 -0700
+@@ -641,9 +641,8 @@ static int ocfs2_fill_super(struct super
ocfs2_complete_mount_recovery(osb);
@@ -52,7 +52,7 @@
atomic_set(&osb->vol_state, VOLUME_MOUNTED);
wake_up(&osb->osb_mount_event);
-@@ -1028,7 +1027,7 @@
+@@ -1028,7 +1027,7 @@ static int ocfs2_fill_local_node_info(st
goto bail;
}
@@ -61,7 +61,7 @@
status = 0;
bail:
-@@ -1208,8 +1207,8 @@
+@@ -1208,8 +1207,8 @@ static void ocfs2_dismount_volume(struct
atomic_set(&osb->vol_state, VOLUME_DISMOUNTED);
@@ -72,7 +72,7 @@
ocfs2_delete_osb(osb);
kfree(osb);
-@@ -1351,7 +1350,7 @@
+@@ -1351,7 +1350,7 @@ static int ocfs2_initialize_super(struct
status = -EINVAL;
goto bail;
}
@@ -83,9 +83,9 @@
osb->osb_orphan_wipes = kcalloc(osb->max_slots,
Index: cert3/fs/ocfs2/slot_map.c
===================================================================
---- cert3.orig/fs/ocfs2/slot_map.c 2006-04-18 14:34:28.908456000 -0700
-+++ cert3/fs/ocfs2/slot_map.c 2006-04-19 12:15:34.881010000 -0700
-@@ -264,7 +264,7 @@
+--- cert3.orig/fs/ocfs2/slot_map.c 2006-04-19 14:45:53.257765000 -0700
++++ cert3/fs/ocfs2/slot_map.c 2006-04-19 14:49:53.227677000 -0700
+@@ -264,7 +264,7 @@ int ocfs2_find_slot(struct ocfs2_super *
osb->slot_num = slot;
spin_unlock(&si->si_lock);
@@ -96,9 +96,9 @@
if (status < 0)
Index: cert3/fs/ocfs2/cluster/tcp.c
===================================================================
---- cert3.orig/fs/ocfs2/cluster/tcp.c 2006-04-19 12:15:32.248662000 -0700
-+++ cert3/fs/ocfs2/cluster/tcp.c 2006-04-19 12:15:34.896010000 -0700
-@@ -397,8 +397,8 @@
+--- cert3.orig/fs/ocfs2/cluster/tcp.c 2006-04-19 14:49:50.946335000 -0700
++++ cert3/fs/ocfs2/cluster/tcp.c 2006-04-19 14:49:53.249655000 -0700
+@@ -397,8 +397,8 @@ static void o2net_set_nn_state(struct o2
}
if (was_valid && !valid) {
@@ -109,7 +109,7 @@
o2net_complete_nodes_nsw(nn);
}
-@@ -410,10 +410,10 @@
+@@ -410,10 +410,10 @@ static void o2net_set_nn_state(struct o2
* the only way to start connecting again is to down
* heartbeat and bring it back up. */
cancel_delayed_work(&nn->nn_connect_expired);
@@ -124,7 +124,7 @@
}
/* trigger the connecting worker func as long as we're not valid,
-@@ -1294,7 +1294,7 @@
+@@ -1294,7 +1294,7 @@ static void o2net_idle_timer(unsigned lo
do_gettimeofday(&now);
Added: branches/ocfs2-1.2-cert/patches/dlm-more-time-needed-for-hb-detection.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-more-time-needed-for-hb-detection.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-more-time-needed-for-hb-detection.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -0,0 +1,13 @@
+Index: cert3/fs/ocfs2/dlm/dlmmaster.c
+===================================================================
+--- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:49:24.001029000 -0700
++++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:49:26.486094000 -0700
+@@ -883,7 +883,7 @@ redo_request:
+ }
+
+ dlm_kick_recovery_thread(dlm);
+- msleep(100);
++ msleep(1000);
+ dlm_wait_for_recovery(dlm);
+
+ spin_lock(&dlm->spinlock);
Modified: branches/ocfs2-1.2-cert/patches/dlm-new_proc_entry
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-new_proc_entry 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-new_proc_entry 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmdebug.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmdebug.c 2006-04-19 12:15:45.604457000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmdebug.c 2006-04-19 12:16:00.178707000 -0700
-@@ -58,6 +58,9 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmdebug.c 2006-04-19 14:50:04.705827000 -0700
++++ cert3/fs/ocfs2/dlm/dlmdebug.c 2006-04-19 14:50:25.546076000 -0700
+@@ -58,6 +58,9 @@ static int dlm_parse_domain_and_lockres(
struct dlm_ctxt **dlm,
struct dlm_lock_resource **res);
@@ -12,7 +12,7 @@
typedef int (dlm_debug_func_t)(const char __user *data, unsigned int len);
struct dlm_debug_funcs
-@@ -114,6 +117,8 @@
+@@ -114,6 +117,8 @@ static struct file_operations dlm_debug_
#define OCFS2_DLM_PROC_PATH "fs/ocfs2_dlm"
#define DLM_DEBUG_PROC_NAME "debug"
@@ -21,7 +21,7 @@
static struct proc_dir_entry *ocfs2_dlm_proc;
void dlm_remove_proc(void)
-@@ -140,6 +145,52 @@
+@@ -140,6 +145,52 @@ void dlm_init_proc(void)
entry->proc_fops = &dlm_debug_operations;
}
@@ -76,8 +76,8 @@
* able to shut it off if needed, hence the KERN_NOTICE level */
Index: cert3/fs/ocfs2/dlm/dlmdebug.h
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmdebug.h 2006-04-18 14:34:21.143910000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmdebug.h 2006-04-19 12:16:00.186699000 -0700
+--- cert3.orig/fs/ocfs2/dlm/dlmdebug.h 2006-04-19 14:45:33.328122000 -0700
++++ cert3/fs/ocfs2/dlm/dlmdebug.h 2006-04-19 14:50:25.556066000 -0700
@@ -28,5 +28,7 @@
void dlm_remove_proc(void);
void dlm_init_proc(void);
@@ -88,9 +88,9 @@
#endif
Index: cert3/fs/ocfs2/dlm/dlmdomain.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmdomain.c 2006-04-19 12:15:48.635488000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmdomain.c 2006-04-19 12:16:00.198688000 -0700
-@@ -193,6 +193,8 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmdomain.c 2006-04-19 14:50:07.546861000 -0700
++++ cert3/fs/ocfs2/dlm/dlmdomain.c 2006-04-19 14:50:25.568066000 -0700
+@@ -193,6 +193,8 @@ static int dlm_wait_on_domain_helper(con
static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm)
{
@@ -99,7 +99,7 @@
if (dlm->lockres_hash)
free_page((unsigned long) dlm->lockres_hash);
-@@ -1278,6 +1280,8 @@
+@@ -1278,6 +1280,8 @@ static struct dlm_ctxt *dlm_alloc_ctxt(c
INIT_LIST_HEAD(&dlm->dlm_eviction_callbacks);
@@ -110,9 +110,9 @@
Index: cert3/fs/ocfs2/dlm/dlmcommon.h
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmcommon.h 2006-04-19 12:15:48.622488000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmcommon.h 2006-04-19 12:16:00.210675000 -0700
-@@ -107,6 +107,8 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmcommon.h 2006-04-19 14:50:07.528877000 -0700
++++ cert3/fs/ocfs2/dlm/dlmcommon.h 2006-04-19 14:50:25.591065000 -0700
+@@ -107,6 +107,8 @@ struct dlm_ctxt
struct list_head master_list;
struct list_head mle_hb_events;
Added: branches/ocfs2-1.2-cert/patches/dlm-quiet-noisy-mastery-reco-msgs.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-quiet-noisy-mastery-reco-msgs.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-quiet-noisy-mastery-reco-msgs.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -0,0 +1,70 @@
+Index: cert3/fs/ocfs2/dlm/dlmmaster.c
+===================================================================
+--- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:50:07.569861000 -0700
++++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:50:23.349915000 -0700
+@@ -1635,13 +1635,13 @@ again:
+ tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
+ &assert, sizeof(assert), to, &r);
+ if (tmpret < 0) {
+- mlog(ML_ERROR, "assert_master returned %d!\n", tmpret);
++ mlog(0, "assert_master returned %d!\n", tmpret);
+ if (!dlm_is_host_down(tmpret)) {
+- mlog(ML_ERROR, "unhandled error!\n");
++ mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
+ BUG();
+ }
+ /* a node died. finish out the rest of the nodes. */
+- mlog(ML_ERROR, "link to %d went down!\n", to);
++ mlog(0, "link to %d went down!\n", to);
+ /* any nonzero status return will do */
+ ret = tmpret;
+ } else if (r < 0) {
+@@ -2025,7 +2025,8 @@ void dlm_assert_master_worker(struct dlm
+ nodemap, flags);
+ if (ret < 0) {
+ /* no need to restart, we are done */
+- mlog_errno(ret);
++ if (!dlm_is_host_down(ret))
++ mlog_errno(ret);
+ }
+
+ /* Ok, we've asserted ourselves. Let's let migration start. */
+@@ -2800,7 +2801,7 @@ top:
+ * may result in the mle being unlinked and
+ * freed, but there may still be a process
+ * waiting in the dlmlock path which is fine. */
+- mlog(ML_ERROR, "node %u was expected master\n",
++ mlog(0, "node %u was expected master\n",
+ dead_node);
+ atomic_set(&mle->woken, 1);
+ spin_unlock(&mle->spinlock);
+Index: cert3/fs/ocfs2/dlm/dlmrecovery.c
+===================================================================
+--- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:50:21.250657000 -0700
++++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:50:23.370895000 -0700
+@@ -487,7 +487,7 @@ static int dlm_in_recovery(struct dlm_ct
+ void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
+ {
+ if (dlm_in_recovery(dlm)) {
+- mlog(ML_NOTICE, "%s: reco thread %d in recovery: "
++ mlog(0, "%s: reco thread %d in recovery: "
+ "state=%d, master=%u, dead=%u\n",
+ dlm->name, dlm->dlm_reco_thread_task->pid,
+ dlm->reco.state, dlm->reco.new_master,
+@@ -743,12 +743,11 @@ static int dlm_remaster_locks(struct dlm
+ break;
+ case DLM_RECO_NODE_DATA_RECEIVING:
+ case DLM_RECO_NODE_DATA_REQUESTED:
+- if (pass % 1000)
+- mlg = ML_ERROR;
+- else if (pass % 100 == 0)
++ mlg = 0;
++ if (pass % 100 == 0)
+ mlg = ML_NOTICE;
+- else
+- mlg = 0;
++ if (pass % 1000 == 0)
++ mlg = ML_ERROR;
+ mlog(mlg, "%s: node %u still in state %s\n",
+ dlm->name, ndata->node_num,
+ ndata->state==DLM_RECO_NODE_DATA_RECEIVING ?
Modified: branches/ocfs2-1.2-cert/patches/dlm-recheck-master-on-unlock.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-recheck-master-on-unlock.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-recheck-master-on-unlock.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmunlock.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmunlock.c 2006-04-17 16:06:38.000000000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmunlock.c 2006-04-17 16:45:10.836584000 -0700
-@@ -319,6 +319,16 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmunlock.c 2006-04-19 14:46:50.779297000 -0700
++++ cert3/fs/ocfs2/dlm/dlmunlock.c 2006-04-19 14:48:08.713179000 -0700
+@@ -319,6 +319,16 @@ static enum dlm_status dlm_send_remote_u
mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
Modified: branches/ocfs2-1.2-cert/patches/dlm-reco-debug-mlogs.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-reco-debug-mlogs.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-reco-debug-mlogs.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmrecovery.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-17 17:54:13.000000000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-17 17:58:21.096120000 -0700
-@@ -239,6 +239,53 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:48:19.765638000 -0700
++++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:48:21.966993000 -0700
+@@ -239,6 +239,53 @@ void dlm_complete_recovery_thread(struct
*
*/
@@ -56,7 +56,7 @@
#define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
-@@ -327,7 +374,28 @@
+@@ -327,7 +374,28 @@ static int dlm_in_recovery(struct dlm_ct
void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
{
@@ -86,7 +86,7 @@
}
static void dlm_begin_recovery(struct dlm_ctxt *dlm)
-@@ -385,7 +453,8 @@
+@@ -385,7 +453,8 @@ int dlm_do_recovery(struct dlm_ctxt *dlm
/* return to main thread loop and sleep. */
return 0;
}
@@ -96,7 +96,7 @@
dlm->reco.dead_node);
spin_unlock(&dlm->spinlock);
-@@ -408,8 +477,8 @@
+@@ -408,8 +477,8 @@ int dlm_do_recovery(struct dlm_ctxt *dlm
}
mlog(0, "another node will master this recovery session.\n");
}
@@ -107,7 +107,7 @@
dlm->node_num, dlm->reco.dead_node);
/* it is safe to start everything back up here
-@@ -421,7 +490,8 @@
+@@ -421,7 +490,8 @@ int dlm_do_recovery(struct dlm_ctxt *dlm
return 0;
master_here:
@@ -117,7 +117,7 @@
dlm->name, dlm->reco.dead_node, dlm->node_num);
status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
-@@ -433,7 +503,7 @@
+@@ -433,7 +503,7 @@ master_here:
msleep(100);
} else {
/* success! see if any other nodes need recovery */
@@ -126,7 +126,7 @@
dlm->name, dlm->reco.dead_node, dlm->node_num);
dlm_reset_recovery(dlm);
}
-@@ -563,11 +633,19 @@
+@@ -563,11 +633,19 @@ static int dlm_remaster_locks(struct dlm
goto leave;
case DLM_RECO_NODE_DATA_RECEIVING:
case DLM_RECO_NODE_DATA_REQUESTED:
@@ -146,7 +146,7 @@
break;
}
}
-@@ -592,7 +670,7 @@
+@@ -592,7 +670,7 @@ static int dlm_remaster_locks(struct dlm
spin_unlock(&dlm->spinlock);
mlog(0, "should be done with recovery!\n");
@@ -155,7 +155,7 @@
"dead=%u, this=%u, new=%u\n", dlm->name,
jiffies, dlm->reco.dead_node,
dlm->node_num, dlm->reco.new_master);
-@@ -713,6 +791,7 @@
+@@ -713,6 +791,7 @@ int dlm_request_all_locks_handler(struct
mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local "
"dead_node is %u\n", dlm->name, lr->node_idx,
lr->dead_node, dlm->reco.dead_node);
@@ -163,7 +163,7 @@
/* this is a hack */
dlm_put(dlm);
return -ENOMEM;
-@@ -763,6 +842,9 @@
+@@ -763,6 +842,9 @@ static void dlm_request_all_locks_worker
reco_master = item->u.ral.reco_master;
mres = (struct dlm_migratable_lockres *)data;
@@ -173,7 +173,7 @@
if (dead_node != dlm->reco.dead_node ||
reco_master != dlm->reco.new_master) {
/* show extra debug info if the recovery state is messed */
-@@ -802,6 +884,9 @@
+@@ -802,6 +884,9 @@ static void dlm_request_all_locks_worker
DLM_MRES_RECOVERY);
if (ret < 0) {
mlog_errno(ret);
@@ -183,7 +183,7 @@
skip_all_done = 1;
break;
}
-@@ -816,6 +901,9 @@
+@@ -816,6 +901,9 @@ static void dlm_request_all_locks_worker
ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
if (ret < 0) {
mlog_errno(ret);
@@ -193,7 +193,7 @@
}
}
-@@ -831,7 +919,7 @@
+@@ -831,7 +919,7 @@ static int dlm_send_all_done_msg(struct
memset(&done_msg, 0, sizeof(done_msg));
done_msg.node_idx = dlm->node_num;
done_msg.dead_node = dead_node;
@@ -202,7 +202,7 @@
"my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
done_msg.dead_node);
-@@ -864,6 +952,11 @@
+@@ -864,6 +952,11 @@ int dlm_reco_data_done_handler(struct o2
mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
"node_idx=%u, this node=%u\n", done->dead_node,
dlm->reco.dead_node, done->node_idx, dlm->node_num);
@@ -214,7 +214,7 @@
BUG_ON(done->dead_node != dlm->reco.dead_node);
spin_lock(&dlm_reco_state_lock);
-@@ -2120,7 +2213,7 @@
+@@ -2120,7 +2213,7 @@ int dlm_pick_recovery_master(struct dlm_
struct dlm_lockstatus lksb;
int status = -EINVAL;
@@ -223,7 +223,7 @@
dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
again:
memset(&lksb, 0, sizeof(lksb));
-@@ -2128,17 +2221,17 @@
+@@ -2128,17 +2221,17 @@ again:
ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
DLM_RECOVERY_LOCK_NAME, dlm_reco_ast, dlm, dlm_reco_bast);
@@ -244,7 +244,7 @@
"do the recovery\n", dlm->name,
dlm->reco.new_master);
status = -EEXIST;
-@@ -2149,7 +2242,7 @@
+@@ -2149,7 +2242,7 @@ again:
spin_lock(&dlm->spinlock);
if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
status = -EINVAL;
@@ -253,7 +253,7 @@
"node got recovered already\n", dlm->name);
if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
mlog(ML_ERROR, "%s: new master is %u "
-@@ -2164,7 +2257,7 @@
+@@ -2164,7 +2257,7 @@ again:
/* if this node has actually become the recovery master,
* set the master and send the messages to begin recovery */
if (!status) {
@@ -262,7 +262,7 @@
"begin_reco now\n", dlm->name,
dlm->reco.dead_node, dlm->node_num);
status = dlm_send_begin_reco_message(dlm,
-@@ -2195,7 +2288,7 @@
+@@ -2195,7 +2288,7 @@ again:
mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
}
} else if (ret == DLM_NOTQUEUED) {
@@ -271,7 +271,7 @@
dlm->name, dlm->node_num);
/* another node is master. wait on
* reco.new_master != O2NM_INVALID_NODE_NUM
-@@ -2204,12 +2297,12 @@
+@@ -2204,12 +2297,12 @@ again:
dlm_reco_master_ready(dlm),
msecs_to_jiffies(1000));
if (!dlm_reco_master_ready(dlm)) {
@@ -286,7 +286,7 @@
dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
status = -EEXIST;
} else {
-@@ -2243,7 +2336,7 @@
+@@ -2243,7 +2336,7 @@ static int dlm_send_begin_reco_message(s
mlog_entry("%u\n", dead_node);
@@ -295,7 +295,7 @@
spin_lock(&dlm->spinlock);
dlm_node_iter_init(dlm->domain_map, &iter);
-@@ -2316,8 +2409,9 @@
+@@ -2316,8 +2409,9 @@ int dlm_begin_reco_handler(struct o2net_
if (!dlm_grab(dlm))
return 0;
@@ -307,7 +307,7 @@
dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
-@@ -2359,6 +2453,11 @@
+@@ -2359,6 +2453,11 @@ int dlm_begin_reco_handler(struct o2net_
spin_unlock(&dlm->spinlock);
dlm_kick_recovery_thread(dlm);
@@ -319,7 +319,7 @@
dlm_put(dlm);
return 0;
}
-@@ -2371,7 +2470,7 @@
+@@ -2371,7 +2470,7 @@ static int dlm_send_finalize_reco_messag
int nodenum;
int status;
@@ -328,7 +328,7 @@
dlm->name, dlm->reco.dead_node);
spin_lock(&dlm->spinlock);
-@@ -2416,8 +2515,9 @@
+@@ -2416,8 +2515,9 @@ int dlm_finalize_reco_handler(struct o2n
if (!dlm_grab(dlm))
return 0;
@@ -340,7 +340,7 @@
spin_lock(&dlm->spinlock);
-@@ -2441,6 +2541,9 @@
+@@ -2441,6 +2541,9 @@ int dlm_finalize_reco_handler(struct o2n
dlm_reset_recovery(dlm);
dlm_kick_recovery_thread(dlm);
Modified: branches/ocfs2-1.2-cert/patches/dlm-recovery-stage-mlogs.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-recovery-stage-mlogs.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-recovery-stage-mlogs.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmrecovery.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 12:10:53.000000000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 12:14:04.901052000 -0700
-@@ -527,6 +527,7 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:49:02.398893000 -0700
++++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:49:04.481168000 -0700
+@@ -527,6 +527,7 @@ static int dlm_remaster_locks(struct dlm
int all_nodes_done;
int destroy = 0;
int pass = 0;
@@ -10,7 +10,7 @@
status = dlm_init_recovery_area(dlm, dead_node);
if (status < 0)
-@@ -565,9 +566,9 @@
+@@ -565,9 +566,9 @@ static int dlm_remaster_locks(struct dlm
BUG();
break;
case DLM_RECO_NODE_DATA_DEAD:
@@ -23,7 +23,7 @@
// start all over
destroy = 1;
status = -EAGAIN;
-@@ -599,6 +600,7 @@
+@@ -599,6 +600,7 @@ static int dlm_remaster_locks(struct dlm
while (1) {
/* check all the nodes now to see if we are
* done, or if anyone died */
@@ -31,7 +31,7 @@
all_nodes_done = 1;
spin_lock(&dlm_reco_state_lock);
list_for_each(iter, &dlm->reco.node_data) {
-@@ -639,7 +641,13 @@
+@@ -639,7 +641,13 @@ static int dlm_remaster_locks(struct dlm
goto leave;
case DLM_RECO_NODE_DATA_RECEIVING:
case DLM_RECO_NODE_DATA_REQUESTED:
@@ -46,7 +46,7 @@
dlm->name, ndata->node_num,
ndata->state==DLM_RECO_NODE_DATA_RECEIVING ?
"receiving" : "requested");
-@@ -657,7 +665,7 @@
+@@ -657,7 +665,7 @@ static int dlm_remaster_locks(struct dlm
}
spin_unlock(&dlm_reco_state_lock);
@@ -55,7 +55,7 @@
all_nodes_done?"yes":"no");
if (all_nodes_done) {
int ret;
-@@ -2116,13 +2124,13 @@
+@@ -2116,13 +2124,13 @@ void __dlm_hb_node_down(struct dlm_ctxt
assert_spin_locked(&dlm->spinlock);
if (dlm->reco.new_master == idx) {
@@ -71,7 +71,7 @@
"finalize1 state, clearing\n", dlm->name, idx);
dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
__dlm_reset_recovery(dlm);
-@@ -2436,7 +2444,7 @@
+@@ -2436,7 +2444,7 @@ retry:
msleep(100);
goto retry;
} else if (ret == EAGAIN) {
@@ -80,7 +80,7 @@
"%u, but node %u is waiting for last recovery "
"to complete, backoff for a bit\n", dlm->name,
dead_node, nodenum);
-@@ -2459,7 +2467,7 @@
+@@ -2459,7 +2467,7 @@ int dlm_begin_reco_handler(struct o2net_
spin_lock(&dlm->spinlock);
if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
@@ -89,7 +89,7 @@
"but this node is in finalize state, waiting on finalize2\n",
dlm->name, br->node_idx, br->dead_node,
dlm->reco.dead_node, dlm->reco.new_master);
-@@ -2531,7 +2539,7 @@
+@@ -2531,7 +2539,7 @@ static int dlm_send_finalize_reco_messag
int status;
int stage = 1;
@@ -98,7 +98,7 @@
"stage %d\n", dlm->name, dlm->reco.dead_node, stage);
spin_lock(&dlm->spinlock);
-@@ -2588,7 +2596,7 @@
+@@ -2588,7 +2596,7 @@ int dlm_finalize_reco_handler(struct o2n
if (fr->flags & DLM_FINALIZE_STAGE2)
stage = 2;
Modified: branches/ocfs2-1.2-cert/patches/dlm-replace_gfp_kernel_with_nofs
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-replace_gfp_kernel_with_nofs 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-replace_gfp_kernel_with_nofs 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmrecovery.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 12:15:42.773426000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 12:15:45.594457000 -0700
-@@ -767,7 +767,7 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:50:02.499513000 -0700
++++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:50:04.678827000 -0700
+@@ -767,7 +767,7 @@ int dlm_init_recovery_area(struct dlm_ct
}
BUG_ON(num == dead_node);
@@ -11,7 +11,7 @@
if (!ndata) {
dlm_destroy_recovery_area(dlm, dead_node);
return -ENOMEM;
-@@ -851,14 +851,14 @@
+@@ -851,14 +851,14 @@ int dlm_request_all_locks_handler(struct
}
BUG_ON(lr->dead_node != dlm->reco.dead_node);
@@ -28,7 +28,7 @@
if (!buf) {
kfree(item);
dlm_put(dlm);
-@@ -1336,8 +1336,8 @@
+@@ -1336,8 +1336,8 @@ int dlm_mig_lockres_handler(struct o2net
mlog(0, "all done flag. all lockres data received!\n");
ret = -ENOMEM;
@@ -41,9 +41,9 @@
Index: cert3/fs/ocfs2/dlm/dlmdebug.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmdebug.c 2006-04-18 14:34:25.239529000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmdebug.c 2006-04-19 12:15:45.604457000 -0700
-@@ -177,7 +177,7 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmdebug.c 2006-04-19 14:45:42.941193000 -0700
++++ cert3/fs/ocfs2/dlm/dlmdebug.c 2006-04-19 14:50:04.705827000 -0700
+@@ -177,7 +177,7 @@ static int dlm_dump_one_lock_resource(co
mlog(ML_ERROR, "user passed too little data: %d bytes\n", len);
goto leave;
}
@@ -52,7 +52,7 @@
if (!buf) {
mlog(ML_ERROR, "could not alloc %d bytes\n", len+1);
ret = -ENOMEM;
-@@ -436,7 +436,7 @@
+@@ -436,7 +436,7 @@ static int dlm_trigger_migration(const c
mlog(ML_ERROR, "user passed too little data: %d bytes\n", len);
goto leave;
}
@@ -63,9 +63,9 @@
ret = -ENOMEM;
Index: cert3/fs/ocfs2/dlm/dlmdomain.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmdomain.c 2006-04-19 12:15:34.858011000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmdomain.c 2006-04-19 12:15:45.616457000 -0700
-@@ -892,7 +892,7 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmdomain.c 2006-04-19 14:49:53.198691000 -0700
++++ cert3/fs/ocfs2/dlm/dlmdomain.c 2006-04-19 14:50:04.717828000 -0700
+@@ -892,7 +892,7 @@ static int dlm_try_to_join_domain(struct
mlog_entry("%p", dlm);
@@ -74,7 +74,7 @@
if (!ctxt) {
status = -ENOMEM;
mlog_errno(status);
-@@ -1187,13 +1187,13 @@
+@@ -1187,13 +1187,13 @@ static struct dlm_ctxt *dlm_alloc_ctxt(c
int i;
struct dlm_ctxt *dlm = NULL;
@@ -90,7 +90,7 @@
if (dlm->name == NULL) {
mlog_errno(-ENOMEM);
kfree(dlm);
-@@ -1201,7 +1201,7 @@
+@@ -1201,7 +1201,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(c
goto leave;
}
@@ -101,9 +101,9 @@
kfree(dlm->name);
Index: cert3/fs/ocfs2/dlm/dlmfs.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmfs.c 2006-04-18 14:34:25.550424000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmfs.c 2006-04-19 12:15:45.624457000 -0700
-@@ -116,7 +116,7 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmfs.c 2006-04-19 14:45:42.958193000 -0700
++++ cert3/fs/ocfs2/dlm/dlmfs.c 2006-04-19 14:50:04.729827000 -0700
+@@ -116,7 +116,7 @@ static int dlmfs_file_open(struct inode
* doesn't make sense for LVB writes. */
file->f_flags &= ~O_APPEND;
@@ -112,7 +112,7 @@
if (!fp) {
status = -ENOMEM;
goto bail;
-@@ -196,7 +196,7 @@
+@@ -196,7 +196,7 @@ static ssize_t dlmfs_file_read(struct fi
else
readlen = count - *ppos;
@@ -121,7 +121,7 @@
if (!lvb_buf)
return -ENOMEM;
-@@ -240,7 +240,7 @@
+@@ -240,7 +240,7 @@ static ssize_t dlmfs_file_write(struct f
else
writelen = count - *ppos;
@@ -132,9 +132,9 @@
Index: cert3/fs/ocfs2/dlm/dlmlock.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmlock.c 2006-04-19 12:15:26.909594000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmlock.c 2006-04-19 12:15:45.634457000 -0700
-@@ -408,13 +408,13 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmlock.c 2006-04-19 14:49:44.191589000 -0700
++++ cert3/fs/ocfs2/dlm/dlmlock.c 2006-04-19 14:50:04.744827000 -0700
+@@ -408,13 +408,13 @@ struct dlm_lock * dlm_new_lock(int type,
struct dlm_lock *lock;
int kernel_allocated = 0;
@@ -152,9 +152,9 @@
return NULL;
Index: cert3/fs/ocfs2/dlm/dlmmaster.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 12:15:42.752426000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 12:15:45.664457000 -0700
-@@ -707,11 +707,11 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:49:59.953437000 -0700
++++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:50:04.769827000 -0700
+@@ -707,11 +707,11 @@ struct dlm_lock_resource *dlm_new_lockre
{
struct dlm_lock_resource *res;
@@ -168,7 +168,7 @@
if (!res->lockname.name) {
kfree(res);
return NULL;
-@@ -774,7 +774,7 @@
+@@ -774,7 +774,7 @@ lookup:
mlog(0, "allocating a new resource\n");
/* nothing found and we need to allocate one. */
alloc_mle = (struct dlm_master_list_entry *)
@@ -177,7 +177,7 @@
if (!alloc_mle)
goto leave;
res = dlm_new_lockres(dlm, lockid, namelen);
-@@ -1595,7 +1595,7 @@
+@@ -1528,7 +1528,7 @@ way_up_top:
spin_unlock(&dlm->spinlock);
mle = (struct dlm_master_list_entry *)
@@ -186,7 +186,7 @@
if (!mle) {
response = DLM_MASTER_RESP_ERROR;
mlog_errno(-ENOMEM);
-@@ -2002,7 +2002,7 @@
+@@ -1935,7 +1935,7 @@ int dlm_dispatch_assert_master(struct dl
int ignore_higher, u8 request_from, u32 flags)
{
struct dlm_work_item *item;
@@ -195,7 +195,7 @@
if (!item)
return -ENOMEM;
-@@ -2238,14 +2238,14 @@
+@@ -2171,14 +2171,14 @@ int dlm_migrate_lockres(struct dlm_ctxt
*/
ret = -ENOMEM;
@@ -212,7 +212,7 @@
if (!mle) {
mlog_errno(ret);
goto leave;
-@@ -2699,7 +2699,7 @@
+@@ -2632,7 +2632,7 @@ int dlm_migrate_request_handler(struct o
/* preallocate.. if this fails, abort */
mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
@@ -223,9 +223,9 @@
ret = -ENOMEM;
Index: cert3/fs/ocfs2/dlm/userdlm.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/userdlm.c 2006-04-18 14:34:25.611422000 -0700
-+++ cert3/fs/ocfs2/dlm/userdlm.c 2006-04-19 12:15:45.673457000 -0700
-@@ -672,7 +672,7 @@
+--- cert3.orig/fs/ocfs2/dlm/userdlm.c 2006-04-19 14:45:43.125110000 -0700
++++ cert3/fs/ocfs2/dlm/userdlm.c 2006-04-19 14:50:04.793827000 -0700
+@@ -672,7 +672,7 @@ struct dlm_ctxt *user_dlm_register_conte
u32 dlm_key;
char *domain;
Added: branches/ocfs2-1.2-cert/patches/dlm-restart-mastery-debug-msgs.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-restart-mastery-debug-msgs.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-restart-mastery-debug-msgs.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -0,0 +1,39 @@
+Index: cert3/fs/ocfs2/dlm/dlmmaster.c
+===================================================================
+--- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:49:17.405067000 -0700
++++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:49:19.621317000 -0700
+@@ -927,7 +927,7 @@ wait:
+ ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
+ if (ret < 0) {
+ wait_on_recovery = 1;
+- mlog(0, "%s:%.*s: node map changed, redo the "
++ mlog(ML_NOTICE, "%s:%.*s: node map changed, redo the "
+ "master request now, blocked=%d\n",
+ dlm->name, res->lockname.len,
+ res->lockname.name, blocked);
+@@ -1216,14 +1216,14 @@ static int dlm_restart_lock_mastery(stru
+ clear_bit(node, mle->maybe_map);
+
+ if (node == lowest) {
+- mlog(0, "expected master %u died"
++ mlog(ML_ERROR, "expected master %u died"
+ " while this node was blocked "
+ "waiting on it!\n", node);
+ lowest = find_next_bit(mle->maybe_map,
+ O2NM_MAX_NODES,
+ lowest+1);
+ if (lowest < O2NM_MAX_NODES) {
+- mlog(0, "%s:%.*s:still "
++ mlog(ML_NOTICE, "%s:%.*s:still "
+ "blocked. waiting on %u "
+ "now\n", dlm->name,
+ res->lockname.len,
+@@ -1238,7 +1238,7 @@ static int dlm_restart_lock_mastery(stru
+ * dlm_do_local_recovery_cleanup
+ * has already run, so the mle
+ * refcount is ok */
+- mlog(0, "%s:%.*s: no "
++ mlog(ML_NOTICE, "%s:%.*s: no "
+ "longer blocking. try to "
+ "master this here\n",
+ dlm->name,
Added: branches/ocfs2-1.2-cert/patches/dlm-wait-for-recovery-msgs.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-wait-for-recovery-msgs.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/dlm-wait-for-recovery-msgs.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -0,0 +1,20 @@
+Index: cert3/fs/ocfs2/dlm/dlmrecovery.c
+===================================================================
+--- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:49:59.980437000 -0700
++++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:50:02.499513000 -0700
+@@ -377,13 +377,13 @@ int dlm_wait_for_node_death(struct dlm_c
+ int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
+ {
+ if (timeout) {
+- mlog(0, "%s: waiting %dms for notification of "
++ mlog(ML_NOTICE, "%s: waiting %dms for notification of "
+ "recovery of node %u\n", dlm->name, timeout, node);
+ wait_event_timeout(dlm->dlm_reco_thread_wq,
+ dlm_is_node_recovered(dlm, node),
+ msecs_to_jiffies(timeout));
+ } else {
+- mlog(0, "%s: waiting indefinitely for notification "
++ mlog(ML_NOTICE, "%s: waiting indefinitely for notification "
+ "of recovery of node %u\n", dlm->name, node);
+ wait_event(dlm->dlm_reco_thread_wq,
+ dlm_is_node_recovered(dlm, node));
Modified: branches/ocfs2-1.2-cert/patches/em-silence-eexist.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/em-silence-eexist.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/em-silence-eexist.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,7 +1,7 @@
Index: fs/ocfs2/extent_map.c
===================================================================
---- fs/ocfs2/extent_map.c (revision 2787)
-+++ fs/ocfs2/extent_map.c (working copy)
+--- fs/ocfs2/extent_map.c.orig 2006-04-19 14:47:35.739805000 -0700
++++ fs/ocfs2/extent_map.c 2006-04-19 14:48:04.274654000 -0700
@@ -296,7 +296,7 @@ static int ocfs2_extent_map_find_leaf(st
ret = ocfs2_extent_map_insert(inode, rec,
Added: branches/ocfs2-1.2-cert/patches/enomem-on-other-reco-node-warning.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/enomem-on-other-reco-node-warning.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/enomem-on-other-reco-node-warning.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -0,0 +1,13 @@
+Index: cert3/fs/ocfs2/dlm/dlmrecovery.c
+===================================================================
+--- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:49:46.581623000 -0700
++++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:49:48.801979000 -0700
+@@ -577,7 +577,7 @@ static int dlm_remaster_locks(struct dlm
+ "yes" : "no");
+ } else {
+ /* -ENOMEM on the other node */
+- mlog(0, "%s: node %u returned "
++ mlog(ML_NOTICE, "%s: node %u returned "
+ "%d during recovery, retrying "
+ "after a short wait\n",
+ dlm->name, ndata->node_num,
Modified: branches/ocfs2-1.2-cert/patches/fix-death-during-recovery.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/fix-death-during-recovery.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/fix-death-during-recovery.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmrecovery.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 12:15:26.937585000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 12:15:29.550625000 -0700
-@@ -502,6 +502,7 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:49:44.217579000 -0700
++++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:49:46.581623000 -0700
+@@ -502,6 +502,7 @@ master_here:
status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
if (status < 0) {
@@ -10,7 +10,7 @@
mlog(ML_ERROR, "error %d remastering locks for node %u, "
"retrying.\n", status, dlm->reco.dead_node);
/* yield a bit to allow any final network messages
-@@ -529,9 +530,16 @@
+@@ -529,9 +530,16 @@ static int dlm_remaster_locks(struct dlm
int pass = 0;
unsigned long long mlg;
@@ -30,7 +30,7 @@
/* safe to access the node data list without a lock, since this
* process is the only one to change the list */
-@@ -548,16 +556,36 @@
+@@ -548,16 +556,36 @@ static int dlm_remaster_locks(struct dlm
continue;
}
@@ -63,7 +63,7 @@
+ "yes" : "no");
+ } else {
+ /* -ENOMEM on the other node */
-+ mlog(ML_NOTICE, "%s: node %u returned "
++ mlog(0, "%s: node %u returned "
+ "%d during recovery, retrying "
+ "after a short wait\n",
+ dlm->name, ndata->node_num,
@@ -76,7 +76,7 @@
switch (ndata->state) {
case DLM_RECO_NODE_DATA_INIT:
-@@ -569,10 +597,9 @@
+@@ -569,10 +597,9 @@ static int dlm_remaster_locks(struct dlm
mlog(ML_ERROR, "%s:node %u died after "
"requesting recovery info for node %u\n",
dlm->name, ndata->node_num, dead_node);
@@ -90,7 +90,7 @@
case DLM_RECO_NODE_DATA_REQUESTING:
ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
mlog(0, "now receiving recovery data from "
-@@ -617,28 +644,12 @@
+@@ -617,28 +644,12 @@ static int dlm_remaster_locks(struct dlm
BUG();
break;
case DLM_RECO_NODE_DATA_DEAD:
@@ -121,7 +121,7 @@
case DLM_RECO_NODE_DATA_RECEIVING:
case DLM_RECO_NODE_DATA_REQUESTED:
if (pass % 1000)
-@@ -689,7 +700,7 @@
+@@ -689,7 +700,7 @@ static int dlm_remaster_locks(struct dlm
jiffies, dlm->reco.dead_node,
dlm->node_num, dlm->reco.new_master);
destroy = 1;
@@ -130,7 +130,7 @@
/* rescan everything marked dirty along the way */
dlm_kick_thread(dlm, NULL);
break;
-@@ -702,7 +713,6 @@
+@@ -702,7 +713,6 @@ static int dlm_remaster_locks(struct dlm
}
@@ -138,7 +138,7 @@
if (destroy)
dlm_destroy_recovery_area(dlm, dead_node);
-@@ -861,24 +871,22 @@
+@@ -861,24 +871,22 @@ static void dlm_request_all_locks_worker
if (dead_node != dlm->reco.dead_node ||
reco_master != dlm->reco.new_master) {
@@ -178,7 +178,7 @@
/* lock resources should have already been moved to the
* dlm->reco.resources list. now move items from that list
-@@ -920,7 +928,7 @@
+@@ -920,7 +928,7 @@ static void dlm_request_all_locks_worker
reco_master, dead_node);
}
}
Modified: branches/ocfs2-1.2-cert/patches/fix-dlmlock_remote.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/fix-dlmlock_remote.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/fix-dlmlock_remote.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmlock.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmlock.c 2006-04-19 12:15:06.832054000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmlock.c 2006-04-19 12:15:17.261501000 -0700
-@@ -200,6 +200,7 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmlock.c 2006-04-19 14:49:15.027927000 -0700
++++ cert3/fs/ocfs2/dlm/dlmlock.c 2006-04-19 14:49:28.722419000 -0700
+@@ -200,6 +200,7 @@ static enum dlm_status dlmlock_remote(st
struct dlm_lock *lock, int flags)
{
enum dlm_status status = DLM_DENIED;
@@ -10,7 +10,7 @@
mlog_entry("type=%d\n", lock->ml.type);
mlog(0, "lockres %.*s, flags = 0x%x\n", res->lockname.len,
-@@ -229,6 +230,10 @@
+@@ -229,6 +230,10 @@ static enum dlm_status dlmlock_remote(st
dlm_error(status);
dlm_revert_pending_lock(res, lock);
dlm_lock_put(lock);
@@ -21,7 +21,7 @@
} else if (dlm_is_recovery_lock(res->lockname.name,
res->lockname.len)) {
/* special case for the $RECOVERY lock.
-@@ -243,7 +248,8 @@
+@@ -243,7 +248,8 @@ static enum dlm_status dlmlock_remote(st
}
spin_unlock(&res->spinlock);
Modified: branches/ocfs2-1.2-cert/patches/fix-purge-lockres.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/fix-purge-lockres.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/fix-purge-lockres.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: fs/ocfs2/dlm/dlmthread.c
===================================================================
---- fs/ocfs2/dlm/dlmthread.c.orig 2006-04-19 11:13:12.516915000 -0700
-+++ fs/ocfs2/dlm/dlmthread.c 2006-04-19 12:14:59.181366000 -0700
-@@ -57,6 +57,8 @@
+--- fs/ocfs2/dlm/dlmthread.c.orig 2006-04-19 14:48:55.714045000 -0700
++++ fs/ocfs2/dlm/dlmthread.c 2006-04-19 14:49:06.681503000 -0700
+@@ -57,6 +57,8 @@ extern spinlock_t dlm_domain_lock;
extern struct list_head dlm_domains;
static int dlm_thread(void *data);
@@ -11,7 +11,7 @@
#define dlm_lock_is_remote(dlm, lock) ((lock)->ml.node != (dlm)->node_num)
-@@ -112,10 +114,23 @@
+@@ -112,10 +114,23 @@ void __dlm_lockres_calc_usage(struct dlm
res->last_used = jiffies;
list_add_tail(&res->purge, &dlm->purge_list);
dlm->purge_count++;
@@ -37,7 +37,7 @@
list_del_init(&res->purge);
dlm->purge_count--;
-@@ -181,6 +196,24 @@
+@@ -181,6 +196,24 @@ finish:
__dlm_unhash_lockres(lockres);
}
Modified: branches/ocfs2-1.2-cert/patches/fix-recovery-spin.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/fix-recovery-spin.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/fix-recovery-spin.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,17 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmmaster.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 12:15:09.395085000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 12:15:14.671470000 -0700
-@@ -883,7 +883,7 @@
- }
-
- dlm_kick_recovery_thread(dlm);
-- msleep(100);
-+ msleep(1000);
- dlm_wait_for_recovery(dlm);
-
- spin_lock(&dlm->spinlock);
-@@ -2112,6 +2112,7 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:49:19.621317000 -0700
++++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:49:24.001029000 -0700
+@@ -2045,6 +2045,7 @@ static int dlm_pre_master_reco_lockres(s
BUG();
/* host is down, so answer for that node would be
* DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
Modified: branches/ocfs2-1.2-cert/patches/fix-remote-lock-during-reco.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/fix-remote-lock-during-reco.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/fix-remote-lock-during-reco.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmlock.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmlock.c 2006-04-19 12:15:21.973161000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmlock.c 2006-04-19 12:15:26.909594000 -0700
-@@ -226,7 +226,16 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmlock.c 2006-04-19 14:49:33.207124000 -0700
++++ cert3/fs/ocfs2/dlm/dlmlock.c 2006-04-19 14:49:41.995233000 -0700
+@@ -226,7 +226,16 @@ static enum dlm_status dlmlock_remote(st
res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
lock->lock_pending = 0;
if (status != DLM_NORMAL) {
@@ -13,14 +13,14 @@
+ /* recovery lock was mastered by dead node.
+ * we need to have calc_usage shoot down this
+ * lockres and completely remaster it. */
-+ mlog(ML_NOTICE, "%s: recovery lock was owned by "
++ mlog(0, "%s: recovery lock was owned by "
+ "dead node %u, remaster it now.\n",
+ dlm->name, res->owner);
+ } else if (status != DLM_NOTQUEUED) {
/*
* DO NOT call calc_usage, as this would unhash
* the remote lockres before we ever get to use
-@@ -691,18 +700,22 @@
+@@ -691,18 +700,22 @@ retry_lock:
msleep(100);
/* no waiting for dlm_reco_thread */
if (recovery) {
@@ -54,14 +54,14 @@
if (status != DLM_NORMAL) {
Index: cert3/fs/ocfs2/dlm/dlmrecovery.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 12:15:04.382700000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 12:15:26.937585000 -0700
-@@ -2425,6 +2425,10 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:49:12.946571000 -0700
++++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:49:42.033233000 -0700
+@@ -2424,6 +2424,10 @@ again:
mlog(ML_NOTICE, "%s: reco master %u is ready to recover %u\n",
dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
status = -EEXIST;
+ } else if (ret == DLM_RECOVERING) {
-+ mlog(ML_NOTICE, "dlm=%s dlmlock says master node died (this=%u)\n",
++ mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
+ dlm->name, dlm->node_num);
+ goto again;
} else {
Added: branches/ocfs2-1.2-cert/patches/fix-trailing-whitespace-1.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/fix-trailing-whitespace-1.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/fix-trailing-whitespace-1.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -0,0 +1,13 @@
+Index: cert3/fs/ocfs2/journal.c
+===================================================================
+--- cert3.orig/fs/ocfs2/journal.c 2006-04-19 14:50:14.000000000 -0700
++++ cert3/fs/ocfs2/journal.c 2006-04-19 14:53:29.109139000 -0700
+@@ -1063,7 +1063,7 @@ restart:
+ ocfs2_super_unlock(osb, 1);
+
+ mlog(ML_NOTICE, "Complete recovery pass on volume %s\n", osb->uuid_str);
+-
++
+ /* We always run recovery on our own orphan dir - the dead
+ * node(s) may have voted "no" on an inode delete earlier. A
+ * revote is therefore required. */
Added: branches/ocfs2-1.2-cert/patches/fix-wait-for-recovery.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/fix-wait-for-recovery.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/fix-wait-for-recovery.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -0,0 +1,28 @@
+Index: cert3/fs/ocfs2/dlm/dlmrecovery.c
+===================================================================
+--- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:50:07.611861000 -0700
++++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:50:21.250657000 -0700
+@@ -492,22 +492,8 @@ void dlm_wait_for_recovery(struct dlm_ct
+ dlm->name, dlm->dlm_reco_thread_task->pid,
+ dlm->reco.state, dlm->reco.new_master,
+ dlm->reco.dead_node);
+- //dlm_print_reco_junk(dlm);
+ }
+-
+- while (1) {
+- if (wait_event_timeout(dlm->reco.event,
+- !dlm_in_recovery(dlm),
+- msecs_to_jiffies(5000)))
+- break;
+- mlog(ML_NOTICE, "%s: reco thread %d still in recovery: "
+- "state=%d, master=%u, dead=%u\n",
+- dlm->name, dlm->dlm_reco_thread_task->pid,
+- dlm->reco.state, dlm->reco.new_master,
+- dlm->reco.dead_node);
+- //dlm_print_reco_junk(dlm);
+- }
+- // wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
++ wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
+ }
+
+ static void dlm_begin_recovery(struct dlm_ctxt *dlm)
Modified: branches/ocfs2-1.2-cert/patches/fix-wait-in-mastery.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/fix-wait-in-mastery.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/fix-wait-in-mastery.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmcommon.h
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmcommon.h 2006-04-19 12:15:11.964393000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmcommon.h 2006-04-19 12:15:42.733426000 -0700
-@@ -700,6 +700,7 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmcommon.h 2006-04-19 14:49:21.724673000 -0700
++++ cert3/fs/ocfs2/dlm/dlmcommon.h 2006-04-19 14:49:59.930437000 -0700
+@@ -700,6 +700,7 @@ void dlm_wait_for_recovery(struct dlm_ct
void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node);
int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
@@ -12,9 +12,9 @@
void dlm_put(struct dlm_ctxt *dlm);
Index: cert3/fs/ocfs2/dlm/dlmmaster.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 12:15:24.584240000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 12:15:42.752426000 -0700
-@@ -896,6 +896,9 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:49:39.793877000 -0700
++++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:49:59.953437000 -0700
+@@ -896,6 +896,9 @@ redo_request:
} else
wait_on_recovery = 0;
spin_unlock(&dlm->spinlock);
@@ -26,9 +26,9 @@
/* must wait for lock to be mastered elsewhere */
Index: cert3/fs/ocfs2/dlm/dlmrecovery.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 12:15:40.321072000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 12:15:42.773426000 -0700
-@@ -344,6 +344,18 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:49:57.748081000 -0700
++++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:49:59.980437000 -0700
+@@ -344,6 +344,18 @@ int dlm_is_node_dead(struct dlm_ctxt *dl
return dead;
}
@@ -47,20 +47,20 @@
int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
{
if (timeout) {
-@@ -362,6 +374,24 @@
+@@ -362,6 +374,24 @@ int dlm_wait_for_node_death(struct dlm_c
return 0;
}
+int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
+{
+ if (timeout) {
-+ mlog(ML_NOTICE, "%s: waiting %dms for notification of "
++ mlog(0, "%s: waiting %dms for notification of "
+ "recovery of node %u\n", dlm->name, timeout, node);
+ wait_event_timeout(dlm->dlm_reco_thread_wq,
+ dlm_is_node_recovered(dlm, node),
+ msecs_to_jiffies(timeout));
+ } else {
-+ mlog(ML_NOTICE, "%s: waiting indefinitely for notification "
++ mlog(0, "%s: waiting indefinitely for notification "
+ "of recovery of node %u\n", dlm->name, node);
+ wait_event(dlm->dlm_reco_thread_wq,
+ dlm_is_node_recovered(dlm, node));
Modified: branches/ocfs2-1.2-cert/patches/hb-add_tracking_around_configured_nodes
===================================================================
--- branches/ocfs2-1.2-cert/patches/hb-add_tracking_around_configured_nodes 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/hb-add_tracking_around_configured_nodes 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/cluster/heartbeat.c
===================================================================
---- cert3.orig/fs/ocfs2/cluster/heartbeat.c 2006-04-19 12:15:19.641855000 -0700
-+++ cert3/fs/ocfs2/cluster/heartbeat.c 2006-04-19 12:15:51.643519000 -0700
-@@ -931,12 +931,14 @@
+--- cert3.orig/fs/ocfs2/cluster/heartbeat.c 2006-04-19 14:49:30.912775000 -0700
++++ cert3/fs/ocfs2/cluster/heartbeat.c 2006-04-19 14:50:12.207573000 -0700
+@@ -931,12 +931,14 @@ static int o2hb_do_disk_heartbeat(struct
struct o2hb_bio_wait_ctxt write_wc;
struct timeval start;
Modified: branches/ocfs2-1.2-cert/patches/hold-dirty-ref.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/hold-dirty-ref.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/hold-dirty-ref.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmcommon.h
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmcommon.h 2006-04-19 11:31:21.585283000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmcommon.h 2006-04-19 12:15:11.964393000 -0700
-@@ -219,18 +219,26 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmcommon.h 2006-04-19 14:49:00.178756000 -0700
++++ cert3/fs/ocfs2/dlm/dlmcommon.h 2006-04-19 14:49:21.724673000 -0700
+@@ -219,18 +219,26 @@ struct dlm_lock_resource
struct hlist_node hash_node;
struct kref refs;
@@ -34,9 +34,9 @@
unsigned migration_pending:1;
Index: cert3/fs/ocfs2/dlm/dlmthread.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmthread.c 2006-04-19 12:14:59.181366000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmthread.c 2006-04-19 12:15:11.974383000 -0700
-@@ -466,6 +466,8 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmthread.c 2006-04-19 14:49:06.681503000 -0700
++++ cert3/fs/ocfs2/dlm/dlmthread.c 2006-04-19 14:49:21.739673000 -0700
+@@ -466,6 +466,8 @@ void __dlm_dirty_lockres(struct dlm_ctxt
/* don't shuffle secondary queues */
if ((res->owner == dlm->node_num) &&
!(res->state & DLM_LOCK_RES_DIRTY)) {
@@ -45,7 +45,7 @@
list_add_tail(&res->dirty, &dlm->dirty_list);
res->state |= DLM_LOCK_RES_DIRTY;
}
-@@ -657,6 +659,8 @@
+@@ -657,6 +659,8 @@ static int dlm_thread(void *data)
list_del_init(&res->dirty);
spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock);
@@ -54,7 +54,7 @@
/* lockres can be re-dirtied/re-added to the
* dirty_list in this gap, but that is ok */
-@@ -709,6 +713,8 @@
+@@ -709,6 +713,8 @@ in_progress:
/* if the lock was in-progress, stick
* it on the back of the list */
if (delay) {
Modified: branches/ocfs2-1.2-cert/patches/hold-recovery-ref.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/hold-recovery-ref.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/hold-recovery-ref.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmrecovery.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-18 14:42:34.672969000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-18 14:51:31.846247000 -0700
-@@ -1782,8 +1782,14 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:48:21.966993000 -0700
++++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:48:55.689045000 -0700
+@@ -1782,8 +1782,14 @@ void dlm_move_lockres_to_recovery_list(s
struct dlm_lock *lock;
res->state |= DLM_LOCK_RES_RECOVERING;
@@ -18,7 +18,7 @@
list_add_tail(&res->recovering, &dlm->reco.resources);
/* find any pending locks and put them back on proper list */
-@@ -1872,9 +1878,11 @@
+@@ -1872,9 +1878,11 @@ static void dlm_finish_local_lockres_rec
spin_lock(&res->spinlock);
dlm_change_lockres_owner(dlm, res, new_master);
res->state &= ~DLM_LOCK_RES_RECOVERING;
@@ -31,7 +31,7 @@
}
}
-@@ -1907,11 +1915,13 @@
+@@ -1907,11 +1915,13 @@ static void dlm_finish_local_lockres_rec
dlm->name, res->lockname.len,
res->lockname.name, res->owner);
list_del_init(&res->recovering);
@@ -48,9 +48,9 @@
}
Index: cert3/fs/ocfs2/dlm/dlmcommon.h
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmcommon.h 2006-04-18 14:42:32.530754000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmcommon.h 2006-04-18 14:50:05.573065000 -0700
-@@ -858,6 +858,7 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmcommon.h 2006-04-19 14:48:11.476213000 -0700
++++ cert3/fs/ocfs2/dlm/dlmcommon.h 2006-04-19 14:48:55.700045000 -0700
+@@ -858,6 +858,7 @@ int dlm_lock_basts_flushed(struct dlm_ct
int dlm_dump_all_mles(const char __user *data, unsigned int len);
@@ -60,9 +60,9 @@
static inline const char * dlm_lock_mode_name(int mode)
Index: cert3/fs/ocfs2/dlm/dlmthread.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmthread.c 2006-04-18 14:42:30.992252000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmthread.c 2006-04-18 14:50:05.590064000 -0700
-@@ -82,7 +82,7 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmthread.c 2006-04-19 14:48:06.522823000 -0700
++++ cert3/fs/ocfs2/dlm/dlmthread.c 2006-04-19 14:48:55.714045000 -0700
+@@ -82,7 +82,7 @@ repeat:
}
Modified: branches/ocfs2-1.2-cert/patches/jrnl-change_gfp_kernel_to_nofs
===================================================================
--- branches/ocfs2-1.2-cert/patches/jrnl-change_gfp_kernel_to_nofs 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/jrnl-change_gfp_kernel_to_nofs 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/extent_map.c
===================================================================
---- cert3.orig/fs/ocfs2/extent_map.c 2006-04-19 11:12:51.669531000 -0700
-+++ cert3/fs/ocfs2/extent_map.c 2006-04-19 12:15:54.251568000 -0700
-@@ -581,7 +581,7 @@
+--- cert3.orig/fs/ocfs2/extent_map.c 2006-04-19 14:48:04.274654000 -0700
++++ cert3/fs/ocfs2/extent_map.c 2006-04-19 14:50:14.247909000 -0700
+@@ -581,7 +581,7 @@ int ocfs2_extent_map_insert(struct inode
ret = -ENOMEM;
ctxt.new_ent = kmem_cache_alloc(ocfs2_em_ent_cachep,
@@ -11,7 +11,7 @@
if (!ctxt.new_ent) {
mlog_errno(ret);
return ret;
-@@ -595,14 +595,14 @@
+@@ -595,14 +595,14 @@ int ocfs2_extent_map_insert(struct inode
if (ctxt.need_left && !ctxt.left_ent) {
ctxt.left_ent =
kmem_cache_alloc(ocfs2_em_ent_cachep,
@@ -30,9 +30,9 @@
}
Index: cert3/fs/ocfs2/journal.c
===================================================================
---- cert3.orig/fs/ocfs2/journal.c 2006-04-19 11:13:07.079645000 -0700
-+++ cert3/fs/ocfs2/journal.c 2006-04-19 12:15:54.263556000 -0700
-@@ -117,7 +117,7 @@
+--- cert3.orig/fs/ocfs2/journal.c 2006-04-19 14:48:47.016943000 -0700
++++ cert3/fs/ocfs2/journal.c 2006-04-19 14:50:14.263893000 -0700
+@@ -117,7 +117,7 @@ struct ocfs2_journal_handle *ocfs2_alloc
{
struct ocfs2_journal_handle *retval = NULL;
@@ -41,7 +41,7 @@
if (!retval) {
mlog(ML_ERROR, "Failed to allocate memory for journal "
"handle!\n");
-@@ -972,7 +972,7 @@
+@@ -972,7 +972,7 @@ static void ocfs2_queue_recovery_complet
{
struct ocfs2_la_recovery_item *item;
@@ -52,9 +52,9 @@
* skipping local alloc cleanup as fsck.ocfs2 is more
Index: cert3/fs/ocfs2/cluster/nodemanager.c
===================================================================
---- cert3.orig/fs/ocfs2/cluster/nodemanager.c 2006-04-18 14:34:22.759388000 -0700
-+++ cert3/fs/ocfs2/cluster/nodemanager.c 2006-04-19 12:15:54.285550000 -0700
-@@ -550,7 +550,7 @@
+--- cert3.orig/fs/ocfs2/cluster/nodemanager.c 2006-04-19 14:45:36.782124000 -0700
++++ cert3/fs/ocfs2/cluster/nodemanager.c 2006-04-19 14:50:14.282874000 -0700
+@@ -550,7 +550,7 @@ static struct config_item *o2nm_node_gro
if (strlen(name) > O2NM_MAX_NAME_LEN)
goto out; /* ENAMETOOLONG */
@@ -63,7 +63,7 @@
if (node == NULL)
goto out; /* ENOMEM */
-@@ -658,9 +658,9 @@
+@@ -658,9 +658,9 @@ static struct config_group *o2nm_cluster
if (o2nm_single_cluster)
goto out; /* ENOSPC */
@@ -78,9 +78,9 @@
goto out;
Index: cert3/fs/ocfs2/cluster/heartbeat.c
===================================================================
---- cert3.orig/fs/ocfs2/cluster/heartbeat.c 2006-04-19 12:15:51.643519000 -0700
-+++ cert3/fs/ocfs2/cluster/heartbeat.c 2006-04-19 12:15:54.300550000 -0700
-@@ -481,7 +481,7 @@
+--- cert3.orig/fs/ocfs2/cluster/heartbeat.c 2006-04-19 14:50:12.207573000 -0700
++++ cert3/fs/ocfs2/cluster/heartbeat.c 2006-04-19 14:50:14.306850000 -0700
+@@ -481,7 +481,7 @@ static int o2hb_read_slots(struct o2hb_r
o2hb_compute_request_limits(reg, max_slots, &num_bios, &slots_per_bio);
o2hb_mlog_blocking(reg, &start, "allocating bios for read");
@@ -89,7 +89,7 @@
o2hb_mlog_blocking_done(reg, &start);
if (!bios) {
status = -ENOMEM;
-@@ -1339,14 +1339,14 @@
+@@ -1339,14 +1339,14 @@ static int o2hb_map_slot_data(struct o2h
char *raw;
struct o2hb_disk_slot *slot;
@@ -106,7 +106,7 @@
if (reg->hr_slots == NULL) {
mlog_errno(-ENOMEM);
return -ENOMEM;
-@@ -1365,14 +1365,14 @@
+@@ -1365,14 +1365,14 @@ static int o2hb_map_slot_data(struct o2h
reg->hr_num_pages, reg->hr_blocks, spp);
reg->hr_slot_data = kcalloc(reg->hr_num_pages, sizeof(struct page *),
@@ -123,7 +123,7 @@
if (!page) {
mlog_errno(-ENOMEM);
return -ENOMEM;
-@@ -1662,7 +1662,7 @@
+@@ -1662,7 +1662,7 @@ static struct config_item *o2hb_heartbea
struct o2hb_region *reg = NULL;
struct config_item *ret = NULL;
@@ -132,7 +132,7 @@
if (reg == NULL)
goto out; /* ENOMEM */
-@@ -1711,7 +1711,7 @@
+@@ -1711,7 +1711,7 @@ struct config_group *o2hb_alloc_hb_set(v
struct o2hb_heartbeat_group *hs = NULL;
struct config_group *ret = NULL;
@@ -143,9 +143,9 @@
Index: cert3/fs/ocfs2/cluster/net_proc.c
===================================================================
---- cert3.orig/fs/ocfs2/cluster/net_proc.c 2006-04-19 12:15:32.233677000 -0700
-+++ cert3/fs/ocfs2/cluster/net_proc.c 2006-04-19 12:15:54.307550000 -0700
-@@ -156,7 +156,7 @@
+--- cert3.orig/fs/ocfs2/cluster/net_proc.c 2006-04-19 14:49:50.924335000 -0700
++++ cert3/fs/ocfs2/cluster/net_proc.c 2006-04-19 14:50:14.314842000 -0700
+@@ -156,7 +156,7 @@ static int nst_fop_open(struct inode *in
struct seq_file *seq;
int ret;
@@ -154,7 +154,7 @@
if (dummy_nst == NULL) {
ret = -ENOMEM;
goto out;
-@@ -339,7 +339,7 @@
+@@ -339,7 +339,7 @@ static int sc_fop_open(struct inode *ino
struct seq_file *seq;
int ret;
@@ -165,9 +165,9 @@
goto out;
Index: cert3/fs/ocfs2/vote.c
===================================================================
---- cert3.orig/fs/ocfs2/vote.c 2006-04-18 14:34:22.794388000 -0700
-+++ cert3/fs/ocfs2/vote.c 2006-04-19 12:15:54.318550000 -0700
-@@ -598,7 +598,7 @@
+--- cert3.orig/fs/ocfs2/vote.c 2006-04-19 14:45:36.908123000 -0700
++++ cert3/fs/ocfs2/vote.c 2006-04-19 14:50:14.330826000 -0700
+@@ -598,7 +598,7 @@ static struct ocfs2_net_wait_ctxt *ocfs2
{
struct ocfs2_net_wait_ctxt *w;
@@ -176,7 +176,7 @@
if (!w) {
mlog_errno(-ENOMEM);
goto bail;
-@@ -761,7 +761,7 @@
+@@ -761,7 +761,7 @@ static struct ocfs2_vote_msg * ocfs2_new
BUG_ON(!ocfs2_is_valid_vote_request(type));
@@ -185,7 +185,7 @@
if (!request) {
mlog_errno(-ENOMEM);
} else {
-@@ -1139,7 +1139,7 @@
+@@ -1139,7 +1139,7 @@ static int ocfs2_handle_vote_message(str
struct ocfs2_super *osb = data;
struct ocfs2_vote_work *work;
Modified: branches/ocfs2-1.2-cert/patches/lockres-release-info.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/lockres-release-info.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/lockres-release-info.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmmaster.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-18 14:42:38.907676000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-18 14:47:43.682671000 -0700
-@@ -613,6 +613,28 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:48:39.109519000 -0700
++++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:48:51.523333000 -0700
+@@ -613,6 +613,28 @@ static void dlm_lockres_release(struct k
mlog(0, "destroying lockres %.*s\n", res->lockname.len,
res->lockname.name);
Modified: branches/ocfs2-1.2-cert/patches/lvb-recovery-fix.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/lvb-recovery-fix.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/lvb-recovery-fix.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,75 +1,8 @@
Index: fs/ocfs2/dlm/dlmrecovery.c
===================================================================
---- fs/ocfs2/dlm/dlmrecovery.c.orig 2006-04-19 12:14:04.901052000 -0700
-+++ fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 12:15:04.382700000 -0700
-@@ -1604,6 +1604,66 @@
- * TODO: do MIGRATING and RECOVERING spinning
- */
-
-+#define DLM_OCFS2_SEC_SHIFT (64 - 34)
-+#define DLM_OCFS2_NSEC_MASK ((1ULL << DLM_OCFS2_SEC_SHIFT) - 1)
-+
-+struct floo {
-+ __be32 lvb_old_seq;
-+ __be32 lvb_version;
-+ __be32 lvb_iclusters;
-+ __be32 lvb_iuid;
-+ __be32 lvb_igid;
-+ __be16 lvb_imode;
-+ __be16 lvb_inlink;
-+ __be64 lvb_iatime_packed;
-+ __be64 lvb_ictime_packed;
-+ __be64 lvb_imtime_packed;
-+ __be64 lvb_isize;
-+ __be32 lvb_reserved[2];
-+};
-+
-+// OLDSEQ-- VERSION- CLUSTERS UIUD---- IGID---- MODE NLNK ATIMEPACKED----- CTIMEPACKED----- MTIMEPACKED----- ISIZE----------- RESERVED--------
-+// 00000000 00000001 00000001 0000c09f 00000262 41ff 0006 10f45a50844bfa5b 110885ed11acf024 110885ed11acf024 0000000000003000 0000000000000000
-+static inline void dlm_print_ocfs2_lvb(unsigned char *lvb)
-+{
-+ struct floo *raw = (struct floo *)lvb;
-+ u32 clusters, uid, gid, oldseq, vers;
-+ u16 mode, nlink;
-+ u64 isize, atime, mtime, ctime;
-+ /* just do some lame decoding, doesn't need to be too
-+ * accurate, just cut the encoded value into smaller values */
-+
-+
-+ oldseq = be32_to_cpu(raw->lvb_old_seq);
-+ vers = be32_to_cpu(raw->lvb_version);
-+ clusters= be32_to_cpu(raw->lvb_iclusters);
-+ isize = be64_to_cpu(raw->lvb_isize);
-+ uid = be32_to_cpu(raw->lvb_iuid);
-+ gid = be32_to_cpu(raw->lvb_igid);
-+ mode = be16_to_cpu(raw->lvb_imode);
-+ nlink = be16_to_cpu(raw->lvb_inlink);
-+ /* just print out the tv_sec portion */
-+ atime = be64_to_cpu(raw->lvb_iatime_packed) >> DLM_OCFS2_SEC_SHIFT;
-+ mtime = be64_to_cpu(raw->lvb_imtime_packed) >> DLM_OCFS2_SEC_SHIFT;
-+ ctime = be64_to_cpu(raw->lvb_ictime_packed) >> DLM_OCFS2_SEC_SHIFT;
-+ printk("[%u:%u:%u:%llu:%u:%u:%u:%u:%llu:%llu:%llu]", oldseq, vers,
-+ clusters, (unsigned long long)isize, uid, gid, mode,
-+ nlink, (unsigned long long)atime,
-+ (unsigned long long)mtime, (unsigned long long)ctime);
-+}
-+
-+static inline void dlm_print_lvb(unsigned char *lvb)
-+{
-+#if 0
-+ int i;
-+ for (i=0; i<DLM_LVB_LEN; i++)
-+ printk("%02x", (unsigned char)lvb[i]);
-+#endif
-+
-+ dlm_print_ocfs2_lvb(lvb);
-+}
-+
-+
- /*
- * NOTE about in-flight requests during migration:
- *
-@@ -1706,40 +1766,50 @@
+--- fs/ocfs2/dlm/dlmrecovery.c.orig 2006-04-19 14:49:04.481168000 -0700
++++ fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:49:10.803215000 -0700
+@@ -1706,40 +1706,48 @@ static int dlm_process_recovery_data(str
}
lksb->flags |= (ml->flags &
(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
@@ -111,24 +44,22 @@
- printk("]\n");
- dlm_print_one_lock_resource(res);
- BUG();
-+ (ml->type == LKM_EXMODE ||
-+ memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
-+ u64 c = be64_to_cpu(lock->ml.cookie);
-+ mlog(ML_ERROR, "%s:%.*s: received bad "
-+ "lvb! type=%d, convtype=%d, "
-+ "node=%u, cookie=%u:%llu\n",
-+ dlm->name, res->lockname.len,
-+ res->lockname.name, ml->type,
-+ ml->convert_type, ml->node,
-+ dlm_get_lock_cookie_node(c),
-+ dlm_get_lock_cookie_seq(c));
-+ printk("lockres lvb=[");
-+ dlm_print_lvb(res->lvb);
++ (ml->type == LKM_EXMODE ||
++ memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
++ int i;
++ mlog(ML_ERROR, "%s:%.*s: received bad "
++ "lvb! type=%d\n", dlm->name,
++ res->lockname.len,
++ res->lockname.name, ml->type);
++ printk("lockres lvb=[");
++ for (i=0; i<DLM_LVB_LEN; i++)
++ printk("%02x", res->lvb[i]);
+ printk("]\nmigrated lvb=[");
-+ dlm_print_lvb(mres->lvb);
-+ printk("]\n");
-+ dlm_print_one_lock_resource(res);
-+ BUG();
++ for (i=0; i<DLM_LVB_LEN; i++)
++ printk("%02x", mres->lvb[i]);
++ printk("]\n");
++ dlm_print_one_lock_resource(res);
++ BUG();
}
memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
}
@@ -138,11 +69,3 @@
/* NOTE:
* wrt lock queue ordering and recovery:
-@@ -1760,6 +1830,7 @@
- bad = 0;
- spin_lock(&res->spinlock);
- list_for_each_entry(lock, queue, list) {
-+#warning does this need be64_to_cpu conversion?
- if (lock->ml.cookie == ml->cookie) {
- u64 c = lock->ml.cookie;
- mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
Modified: branches/ocfs2-1.2-cert/patches/mar24-create-lock-handler.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/mar24-create-lock-handler.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/mar24-create-lock-handler.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: fs/ocfs2/dlm/dlmlock.c
===================================================================
---- fs/ocfs2/dlm/dlmlock.c.orig 2006-04-18 14:34:36.506882000 -0700
-+++ fs/ocfs2/dlm/dlmlock.c 2006-04-19 12:15:06.832054000 -0700
-@@ -280,6 +280,14 @@
+--- fs/ocfs2/dlm/dlmlock.c.orig 2006-04-19 14:46:30.873735000 -0700
++++ fs/ocfs2/dlm/dlmlock.c 2006-04-19 14:49:15.027927000 -0700
+@@ -280,6 +280,14 @@ static enum dlm_status dlm_send_remote_l
if (tmpret >= 0) {
// successfully sent and received
ret = status; // this is already a dlm_status
@@ -17,7 +17,7 @@
} else {
mlog_errno(tmpret);
if (dlm_is_host_down(tmpret)) {
-@@ -428,11 +436,16 @@
+@@ -428,11 +436,16 @@ int dlm_create_lock_handler(struct o2net
if (!dlm_grab(dlm))
return DLM_REJECTED;
Modified: branches/ocfs2-1.2-cert/patches/mastery-restart-recovery.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/mastery-restart-recovery.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/mastery-restart-recovery.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: fs/ocfs2/dlm/dlmmaster.c
===================================================================
---- fs/ocfs2/dlm/dlmmaster.c.orig 2006-04-19 12:15:01.917663000 -0700
-+++ fs/ocfs2/dlm/dlmmaster.c 2006-04-19 12:15:09.395085000 -0700
-@@ -864,6 +864,7 @@
+--- fs/ocfs2/dlm/dlmmaster.c.orig 2006-04-19 14:49:08.735859000 -0700
++++ fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:49:17.405067000 -0700
+@@ -864,6 +864,7 @@ lookup:
spin_unlock(&dlm->master_lock);
spin_unlock(&dlm->spinlock);
@@ -10,7 +10,7 @@
while (wait_on_recovery) {
/* any cluster changes that occurred after dropping the
* dlm spinlock would be detectable be a change on the mle,
-@@ -901,7 +902,6 @@
+@@ -901,7 +902,6 @@ lookup:
if (blocked)
goto wait;
@@ -18,36 +18,59 @@
ret = -EINVAL;
dlm_node_iter_init(mle->vote_map, &iter);
while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
-@@ -926,7 +926,8 @@
+@@ -926,6 +926,7 @@ wait:
/* keep going until the response map includes all nodes */
ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
if (ret < 0) {
-- mlog(0, "%s:%.*s: node map changed, redo the "
+ wait_on_recovery = 1;
-+ mlog(ML_NOTICE, "%s:%.*s: node map changed, redo the "
+ mlog(0, "%s:%.*s: node map changed, redo the "
"master request now, blocked=%d\n",
dlm->name, res->lockname.len,
- res->lockname.name, blocked);
-@@ -1207,7 +1208,60 @@
+@@ -1207,18 +1208,6 @@ static int dlm_restart_lock_mastery(stru
set_bit(node, mle->vote_map);
} else {
mlog(ML_ERROR, "node down! %d\n", node);
-+ if (blocked) {
-+ int lowest = find_next_bit(mle->maybe_map,
-+ O2NM_MAX_NODES, 0);
-+
-+ /* act like it was never there */
-+ clear_bit(node, mle->maybe_map);
+-
+- /* if the node wasn't involved in mastery skip it,
+- * but clear it out from the maps so that it will
+- * not affect mastery of this lockres */
+- clear_bit(node, mle->response_map);
+- clear_bit(node, mle->vote_map);
+- if (!test_bit(node, mle->maybe_map))
+- goto next;
+-
+- /* if we're already blocked on lock mastery, and the
+- * dead node wasn't the expected master, or there is
+- * another node in the maybe_map, keep waiting */
+ if (blocked) {
+ int lowest = find_next_bit(mle->maybe_map,
+ O2NM_MAX_NODES, 0);
+@@ -1226,54 +1215,53 @@ static int dlm_restart_lock_mastery(stru
+ /* act like it was never there */
+ clear_bit(node, mle->maybe_map);
+- if (node != lowest)
+- goto next;
+-
+- mlog(ML_ERROR, "expected master %u died while "
+- "this node was blocked waiting on it!\n",
+- node);
+- lowest = find_next_bit(mle->maybe_map,
+- O2NM_MAX_NODES,
+- lowest+1);
+- if (lowest < O2NM_MAX_NODES) {
+- mlog(0, "still blocked. waiting "
+- "on %u now\n", lowest);
+- goto next;
+ if (node == lowest) {
-+ mlog(ML_ERROR, "expected master %u died"
++ mlog(0, "expected master %u died"
+ " while this node was blocked "
+ "waiting on it!\n", node);
+ lowest = find_next_bit(mle->maybe_map,
+ O2NM_MAX_NODES,
+ lowest+1);
+ if (lowest < O2NM_MAX_NODES) {
-+ mlog(ML_NOTICE, "%s:%.*s:still "
++ mlog(0, "%s:%.*s:still "
+ "blocked. waiting on %u "
+ "now\n", dlm->name,
+ res->lockname.len,
@@ -62,7 +85,7 @@
+ * dlm_do_local_recovery_cleanup
+ * has already run, so the mle
+ * refcount is ok */
-+ mlog(ML_NOTICE, "%s:%.*s: no "
++ mlog(0, "%s:%.*s: no "
+ "longer blocking. try to "
+ "master this here\n",
+ dlm->name,
@@ -71,9 +94,38 @@
+ mle->type = DLM_MLE_MASTER;
+ mle->u.res = res;
+ }
-+ }
-+ }
-+
+ }
+-
+- /* mle is an MLE_BLOCK, but there is now
+- * nothing left to block on. we need to return
+- * all the way back out and try again with
+- * an MLE_MASTER. dlm_do_local_recovery_cleanup
+- * has already run, so the mle refcount is ok */
+- mlog(0, "no longer blocking. we can "
+- "try to master this here\n");
+- mle->type = DLM_MLE_MASTER;
+- memset(mle->maybe_map, 0,
+- sizeof(mle->maybe_map));
+- memset(mle->response_map, 0,
+- sizeof(mle->maybe_map));
+- memcpy(mle->vote_map, mle->node_map,
+- sizeof(mle->node_map));
+- mle->u.res = res;
+- set_bit(dlm->node_num, mle->maybe_map);
+-
+- ret = -EAGAIN;
+- goto next;
+ }
+
+- clear_bit(node, mle->maybe_map);
+- if (node > dlm->node_num)
+- goto next;
+-
+- mlog(0, "dead node in map!\n");
+- /* yuck. go back and re-contact all nodes
+- * in the vote_map, removing this node. */
+- memset(mle->response_map, 0,
+- sizeof(mle->response_map));
+ /* now blank out everything, as if we had never
+ * contacted anyone */
+ memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
@@ -84,37 +136,9 @@
+ /* put myself into the maybe map */
+ if (mle->type != DLM_MLE_BLOCK)
+ set_bit(dlm->node_num, mle->maybe_map);
-+
-+#if 0
- /* if the node wasn't involved in mastery skip it,
- * but clear it out from the maps so that it will
- * not affect mastery of this lockres */
-@@ -1215,7 +1269,6 @@
- clear_bit(node, mle->vote_map);
- if (!test_bit(node, mle->maybe_map))
- goto next;
--
- /* if we're already blocked on lock mastery, and the
- * dead node wasn't the expected master, or there is
- * another node in the maybe_map, keep waiting */
-@@ -1261,7 +1314,6 @@
- ret = -EAGAIN;
- goto next;
- }
--
- clear_bit(node, mle->maybe_map);
- if (node > dlm->node_num)
- goto next;
-@@ -1271,9 +1323,12 @@
- * in the vote_map, removing this node. */
- memset(mle->response_map, 0,
- sizeof(mle->response_map));
-+#endif
}
ret = -EAGAIN;
-+#if 0
- next:
-+#endif
+-next:
node = dlm_bitmap_diff_iter_next(&bdi, &sc);
}
return ret;
Modified: branches/ocfs2-1.2-cert/patches/move-dlm-work-to-thread.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/move-dlm-work-to-thread.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/move-dlm-work-to-thread.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmcommon.h
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmcommon.h 2006-04-19 12:15:42.733426000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmcommon.h 2006-04-19 12:15:48.622488000 -0700
-@@ -121,12 +121,13 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmcommon.h 2006-04-19 14:49:59.930437000 -0700
++++ cert3/fs/ocfs2/dlm/dlmcommon.h 2006-04-19 14:50:07.528877000 -0700
+@@ -121,12 +121,13 @@ struct dlm_ctxt
struct o2hb_callback_func dlm_hb_down;
struct task_struct *dlm_thread_task;
struct task_struct *dlm_reco_thread_task;
@@ -17,7 +17,7 @@
struct list_head work_list;
spinlock_t work_lock;
struct list_head dlm_domain_handlers;
-@@ -701,6 +702,10 @@
+@@ -701,6 +702,10 @@ void dlm_kick_recovery_thread(struct dlm
int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node);
int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout);
@@ -30,9 +30,9 @@
void dlm_put(struct dlm_ctxt *dlm);
Index: cert3/fs/ocfs2/dlm/dlmdomain.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmdomain.c 2006-04-19 12:15:45.616457000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmdomain.c 2006-04-19 12:15:48.635488000 -0700
-@@ -290,6 +290,7 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmdomain.c 2006-04-19 14:50:04.717828000 -0700
++++ cert3/fs/ocfs2/dlm/dlmdomain.c 2006-04-19 14:50:07.546861000 -0700
+@@ -290,6 +290,7 @@ static void dlm_complete_dlm_shutdown(st
dlm_unregister_domain_handlers(dlm);
dlm_complete_thread(dlm);
dlm_complete_recovery_thread(dlm);
@@ -40,7 +40,7 @@
/* We've left the domain. Now we can take ourselves out of the
* list and allow the kref stuff to help us free the
-@@ -1136,6 +1137,12 @@
+@@ -1136,6 +1137,12 @@ static int dlm_join_domain(struct dlm_ct
goto bail;
}
@@ -53,7 +53,7 @@
do {
unsigned int backoff;
status = dlm_try_to_join_domain(dlm);
-@@ -1176,11 +1183,14 @@
+@@ -1176,11 +1183,14 @@ bail:
dlm_unregister_domain_handlers(dlm);
dlm_complete_thread(dlm);
dlm_complete_recovery_thread(dlm);
@@ -68,7 +68,7 @@
static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
u32 key)
{
-@@ -1241,8 +1251,10 @@
+@@ -1241,8 +1251,10 @@ static struct dlm_ctxt *dlm_alloc_ctxt(c
dlm->dlm_thread_task = NULL;
dlm->dlm_reco_thread_task = NULL;
@@ -79,7 +79,7 @@
init_waitqueue_head(&dlm->reco.event);
init_waitqueue_head(&dlm->ast_wq);
init_waitqueue_head(&dlm->migration_wq);
-@@ -1260,7 +1272,6 @@
+@@ -1260,7 +1272,6 @@ static struct dlm_ctxt *dlm_alloc_ctxt(c
spin_lock_init(&dlm->work_lock);
INIT_LIST_HEAD(&dlm->work_list);
@@ -89,9 +89,9 @@
dlm->dlm_state = DLM_CTXT_NEW;
Index: cert3/fs/ocfs2/dlm/dlmmaster.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 12:15:45.664457000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 12:15:48.661488000 -0700
-@@ -2024,7 +2024,7 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:50:04.769827000 -0700
++++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:50:07.569861000 -0700
+@@ -1957,7 +1957,7 @@ int dlm_dispatch_assert_master(struct dl
list_add_tail(&item->list, &dlm->work_list);
spin_unlock(&dlm->work_lock);
@@ -102,9 +102,9 @@
Index: cert3/fs/ocfs2/dlm/dlmrecovery.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 12:15:45.594457000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 12:15:48.682488000 -0700
-@@ -149,6 +149,72 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:50:04.678827000 -0700
++++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:50:07.611861000 -0700
+@@ -149,6 +149,72 @@ static inline void dlm_reset_recovery(st
spin_unlock(&dlm->spinlock);
}
@@ -177,7 +177,7 @@
/* Worker function used during recovery. */
void dlm_dispatch_work(void *data)
{
-@@ -157,15 +223,22 @@
+@@ -157,15 +223,22 @@ void dlm_dispatch_work(void *data)
struct list_head *iter, *iter2;
struct dlm_work_item *item;
dlm_workfunc_t *workfunc;
@@ -200,7 +200,7 @@
/* already have ref on dlm to avoid having
* it disappear. just double-check. */
-@@ -173,7 +246,10 @@
+@@ -173,7 +246,10 @@ void dlm_dispatch_work(void *data)
/* this is allowed to sleep and
* call network stuff */
@@ -211,7 +211,7 @@
dlm_put(dlm);
kfree(item);
-@@ -873,7 +949,7 @@
+@@ -873,7 +949,7 @@ int dlm_request_all_locks_handler(struct
spin_lock(&dlm->work_lock);
list_add_tail(&item->list, &dlm->work_list);
spin_unlock(&dlm->work_lock);
@@ -220,7 +220,7 @@
dlm_put(dlm);
return 0;
-@@ -1428,7 +1504,7 @@
+@@ -1428,7 +1504,7 @@ int dlm_mig_lockres_handler(struct o2net
spin_lock(&dlm->work_lock);
list_add_tail(&item->list, &dlm->work_list);
spin_unlock(&dlm->work_lock);
@@ -229,16 +229,3 @@
leave:
dlm_put(dlm);
-Index: cert3/fs/ocfs2/dlm/dlmthread.c
-===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmthread.c 2006-04-19 12:15:21.963171000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmthread.c 2006-04-19 12:15:48.692488000 -0700
-@@ -750,7 +750,7 @@
-
- /* yield and continue right away if there is more work to do */
- if (!n) {
-- yield();
-+ cond_resched();
- continue;
- }
-
Modified: branches/ocfs2-1.2-cert/patches/never-purge-master.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/never-purge-master.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/never-purge-master.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmthread.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmthread.c 2006-04-19 12:15:11.974383000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmthread.c 2006-04-19 12:15:21.963171000 -0700
-@@ -107,6 +107,21 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmthread.c 2006-04-19 14:49:21.739673000 -0700
++++ cert3/fs/ocfs2/dlm/dlmthread.c 2006-04-19 14:49:33.180131000 -0700
+@@ -107,6 +107,20 @@ void __dlm_lockres_calc_usage(struct dlm
assert_spin_locked(&res->spinlock);
if (__dlm_lockres_unused(res)){
@@ -10,8 +10,7 @@
+ if (res->owner == dlm->node_num)
+ {
+ if (!list_empty(&res->purge)) {
-+ mlog(ML_NOTICE,
-+ "we master %s:%.*s, but it is on "
++ mlog(0, "we master %s:%.*s, but it is on "
+ "the purge list. Removing\n",
+ dlm->name, res->lockname.len,
+ res->lockname.name);
@@ -26,9 +25,9 @@
res->lockname.len, res->lockname.name);
Index: cert3/fs/ocfs2/dlm/dlmlock.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmlock.c 2006-04-19 12:15:17.261501000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmlock.c 2006-04-19 12:15:21.973161000 -0700
-@@ -226,14 +226,18 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmlock.c 2006-04-19 14:49:28.722419000 -0700
++++ cert3/fs/ocfs2/dlm/dlmlock.c 2006-04-19 14:49:33.207124000 -0700
+@@ -226,14 +226,18 @@ static enum dlm_status dlmlock_remote(st
res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
lock->lock_pending = 0;
if (status != DLM_NORMAL) {
Modified: branches/ocfs2-1.2-cert/patches/ocfs2-1.2-no-idr-0.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/ocfs2-1.2-no-idr-0.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/ocfs2-1.2-no-idr-0.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: fs/ocfs2/cluster/net_proc.c
===================================================================
---- fs/ocfs2/cluster/net_proc.c.orig 2006-04-18 14:34:29.680134000 -0700
-+++ fs/ocfs2/cluster/net_proc.c 2006-04-19 12:15:32.233677000 -0700
-@@ -117,6 +117,7 @@
+--- fs/ocfs2/cluster/net_proc.c.orig 2006-04-19 14:45:54.172530000 -0700
++++ fs/ocfs2/cluster/net_proc.c 2006-04-19 14:49:50.924335000 -0700
+@@ -117,6 +117,7 @@ static int nst_seq_show(struct seq_file
" process name: %s\n"
" node: %u\n"
" sc: %p\n"
@@ -10,7 +10,7 @@
" message type: %u\n"
" message key: 0x%08x\n"
" sock acquiry: %lu.%lu\n"
-@@ -125,7 +126,8 @@
+@@ -125,7 +126,8 @@ static int nst_seq_show(struct seq_file
nst, (unsigned long)nst->st_task->pid,
(unsigned long)nst->st_task->tgid,
nst->st_task->comm, nst->st_node,
@@ -20,7 +20,7 @@
nst->st_sock_time.tv_sec, nst->st_sock_time.tv_usec,
nst->st_send_time.tv_sec, nst->st_send_time.tv_usec,
nst->st_status_time.tv_sec,
-@@ -253,6 +255,8 @@
+@@ -253,6 +255,8 @@ static void *sc_seq_next(struct seq_file
return sc; /* unused, just needs to be null when done */
}
@@ -29,7 +29,7 @@
static int sc_seq_show(struct seq_file *seq, void *v)
{
struct o2net_sock_container *sc, *dummy_sc = seq->private;
-@@ -285,11 +289,31 @@
+@@ -285,11 +289,31 @@ static int sc_seq_show(struct seq_file *
" krefs: %d\n"
" sock: %u.%u.%u.%u:%u -> %u.%u.%u.%u:%u\n"
" remote node: %s\n"
@@ -67,8 +67,8 @@
Index: fs/ocfs2/cluster/tcp.c
===================================================================
---- fs/ocfs2/cluster/tcp.c.orig 2006-04-18 14:34:29.687139000 -0700
-+++ fs/ocfs2/cluster/tcp.c 2006-04-19 12:15:32.248662000 -0700
+--- fs/ocfs2/cluster/tcp.c.orig 2006-04-19 14:45:54.331369000 -0700
++++ fs/ocfs2/cluster/tcp.c 2006-04-19 14:49:50.946335000 -0700
@@ -56,7 +56,6 @@
#include <linux/kernel.h>
#include <linux/jiffies.h>
@@ -77,7 +77,7 @@
#include <linux/kref.h>
#include <net/tcp.h>
-@@ -173,39 +172,18 @@
+@@ -173,39 +172,18 @@ static u8 o2net_num_from_nn(struct o2net
/* ------------------------------------------------------------ */
@@ -125,7 +125,7 @@
}
static void o2net_complete_nsw_locked(struct o2net_node *nn,
-@@ -219,31 +197,43 @@
+@@ -219,31 +197,43 @@ static void o2net_complete_nsw_locked(st
list_del_init(&nsw->ns_node_item);
nsw->ns_sys_status = sys_status;
nsw->ns_status = status;
@@ -183,7 +183,7 @@
}
static void o2net_complete_nodes_nsw(struct o2net_node *nn)
-@@ -951,11 +941,10 @@
+@@ -951,11 +941,10 @@ int o2net_send_message_vec(u32 msg_type,
vec[0].iov_base = msg;
memcpy(&vec[1], caller_vec, caller_veclen * sizeof(struct kvec));
@@ -197,7 +197,7 @@
do_gettimeofday(&nst.st_send_time);
/* finally, convert the message header to network byte-order
-@@ -989,7 +978,7 @@
+@@ -989,7 +978,7 @@ out:
kfree(vec);
if (msg)
kfree(msg);
@@ -206,7 +206,7 @@
return ret;
}
EXPORT_SYMBOL_GPL(o2net_send_message_vec);
-@@ -1045,10 +1034,9 @@
+@@ -1045,10 +1034,9 @@ static int o2net_process_message(struct
switch(be16_to_cpu(hdr->magic)) {
case O2NET_MSG_STATUS_MAGIC:
/* special type for returning message status */
@@ -220,7 +220,7 @@
goto out;
case O2NET_MSG_KEEP_REQ_MAGIC:
o2net_sendpage(sc, o2net_keep_resp,
-@@ -1865,7 +1853,6 @@
+@@ -1865,7 +1853,6 @@ int o2net_init(void)
/* until we see hb from a node we'll return einval */
nn->nn_persistent_error = -ENOTCONN;
init_waitqueue_head(&nn->nn_sc_wq);
@@ -230,9 +230,9 @@
Index: fs/ocfs2/cluster/tcp_internal.h
===================================================================
---- fs/ocfs2/cluster/tcp_internal.h.orig 2006-04-18 14:34:29.988136000 -0700
-+++ fs/ocfs2/cluster/tcp_internal.h 2006-04-19 12:15:32.256656000 -0700
-@@ -62,7 +62,7 @@
+--- fs/ocfs2/cluster/tcp_internal.h.orig 2006-04-19 14:45:54.348354000 -0700
++++ fs/ocfs2/cluster/tcp_internal.h 2006-04-19 14:49:50.972335000 -0700
+@@ -62,7 +62,7 @@ struct o2net_node {
* or fails or when an accepted socket is attached. */
wait_queue_head_t nn_sc_wq;
@@ -241,7 +241,7 @@
struct list_head nn_status_list;
/* connects are attempted from when heartbeat comes up until either hb
-@@ -160,7 +160,7 @@
+@@ -160,7 +160,7 @@ enum o2net_system_error {
struct o2net_status_wait {
enum o2net_system_error ns_sys_status;
s32 ns_status;
@@ -250,7 +250,7 @@
wait_queue_head_t ns_wq;
struct list_head ns_node_item;
};
-@@ -170,6 +170,7 @@
+@@ -170,6 +170,7 @@ struct o2net_send_tracking {
struct list_head st_net_proc_item;
struct task_struct *st_task;
struct o2net_sock_container *st_sc;
Modified: branches/ocfs2-1.2-cert/patches/ocfs2-extend_tracing2.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/ocfs2-extend_tracing2.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/ocfs2-extend_tracing2.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: fs/ocfs2/file.c
===================================================================
---- fs/ocfs2/file.c.orig 2006-04-17 15:59:26.569849000 -0700
-+++ fs/ocfs2/file.c 2006-04-18 14:00:37.937167000 -0700
-@@ -733,7 +733,8 @@
+--- fs/ocfs2/file.c.orig 2006-04-19 14:46:40.821517000 -0700
++++ fs/ocfs2/file.c 2006-04-19 14:48:41.096875000 -0700
+@@ -733,7 +733,8 @@ leave:
int ocfs2_extend_file(struct ocfs2_super *osb,
struct inode *inode,
u64 new_i_size,
@@ -12,7 +12,7 @@
{
int status = 0;
int restart_func = 0;
-@@ -747,6 +748,8 @@
+@@ -747,6 +748,8 @@ int ocfs2_extend_file(struct ocfs2_super
struct ocfs2_alloc_context *data_ac = NULL;
struct ocfs2_alloc_context *meta_ac = NULL;
enum ocfs2_alloc_restarted why;
@@ -21,7 +21,7 @@
mlog_entry("(Inode %"MLFu64" new_i_size=%"MLFu64")\n",
OCFS2_I(inode)->ip_blkno, new_i_size);
-@@ -758,6 +761,8 @@
+@@ -758,6 +761,8 @@ int ocfs2_extend_file(struct ocfs2_super
goto leave;
restart_all:
@@ -30,7 +30,7 @@
handle = ocfs2_alloc_handle(osb);
if (handle == NULL) {
status = -ENOMEM;
-@@ -778,18 +783,23 @@
+@@ -778,18 +783,23 @@ restart_all:
status = -EIO;
goto leave;
}
@@ -66,7 +66,7 @@
if (i_size_read(inode) == new_i_size)
goto leave;
-@@ -852,6 +862,7 @@
+@@ -852,6 +862,7 @@ do_start_trans:
}
restarted_transaction:
@@ -74,7 +74,7 @@
/* reserve a write to the file entry early on - that we if we
* run out of credits in the allocation path, we can still
* update i_size. */
-@@ -1039,7 +1050,7 @@
+@@ -1039,7 +1050,7 @@ int ocfs2_setattr(struct dentry *dentry,
status = ocfs2_truncate_file(osb, newsize, inode);
else
status = ocfs2_extend_file(osb, inode, newsize,
@@ -85,9 +85,9 @@
mlog_errno(status);
Index: fs/ocfs2/file.h
===================================================================
---- fs/ocfs2/file.h.orig 2006-04-17 15:59:26.575850000 -0700
-+++ fs/ocfs2/file.h 2006-04-18 14:00:37.964140000 -0700
-@@ -50,7 +50,8 @@
+--- fs/ocfs2/file.h.orig 2006-04-19 14:46:41.012515000 -0700
++++ fs/ocfs2/file.h 2006-04-19 14:48:41.121875000 -0700
+@@ -50,7 +50,8 @@ int ocfs2_sync_inode(struct inode *inode
int ocfs2_extend_file(struct ocfs2_super *osb,
struct inode *inode,
u64 new_i_size,
@@ -99,9 +99,9 @@
struct inode *inode,
Index: fs/ocfs2/mmap.c
===================================================================
---- fs/ocfs2/mmap.c.orig 2006-04-17 15:59:26.742849000 -0700
-+++ fs/ocfs2/mmap.c 2006-04-18 14:00:37.977127000 -0700
-@@ -529,6 +529,7 @@
+--- fs/ocfs2/mmap.c.orig 2006-04-19 14:46:41.027515000 -0700
++++ fs/ocfs2/mmap.c 2006-04-19 14:48:41.133875000 -0700
+@@ -529,6 +529,7 @@ ssize_t ocfs2_write_lock_maybe_extend(st
int level = filp->f_flags & O_APPEND;
loff_t saved_ppos;
u64 bytes_added = 0;
@@ -109,7 +109,7 @@
osb = OCFS2_SB(inode->i_sb);
-@@ -675,11 +676,12 @@
+@@ -675,11 +676,12 @@ lock:
"i_size=%lld, need=%"MLFu64"\n", i_size_read(inode),
info->wl_newsize);
Modified: branches/ocfs2-1.2-cert/patches/ocfs2-journal_start_stop_msgs.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/ocfs2-journal_start_stop_msgs.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/ocfs2-journal_start_stop_msgs.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: fs/ocfs2/journal.c
===================================================================
---- fs/ocfs2/journal.c.orig 2006-04-17 15:59:26.263850000 -0700
-+++ fs/ocfs2/journal.c 2006-04-18 14:00:45.776773000 -0700
-@@ -1029,6 +1029,8 @@
+--- fs/ocfs2/journal.c.orig 2006-04-19 14:46:40.554517000 -0700
++++ fs/ocfs2/journal.c 2006-04-19 14:48:43.374023000 -0700
+@@ -1029,6 +1029,8 @@ static int __ocfs2_recovery_thread(void
}
restart:
@@ -11,7 +11,7 @@
status = ocfs2_super_lock(osb, 1);
if (status < 0) {
mlog_errno(status);
-@@ -1043,6 +1045,7 @@
+@@ -1043,6 +1045,7 @@ restart:
break;
}
@@ -19,7 +19,7 @@
status = ocfs2_recover_node(osb, node_num);
if (status < 0) {
mlog(ML_ERROR,
-@@ -1052,11 +1055,13 @@
+@@ -1052,11 +1055,13 @@ restart:
mlog(ML_ERROR, "Volume requires unmount.\n");
continue;
}
Modified: branches/ocfs2-1.2-cert/patches/ocfs2-reco_nofs.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/ocfs2-reco_nofs.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/ocfs2-reco_nofs.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: fs/ocfs2/journal.c
===================================================================
---- fs/ocfs2/journal.c.orig 2006-04-18 14:00:45.776773000 -0700
-+++ fs/ocfs2/journal.c 2006-04-18 14:00:53.905090000 -0700
-@@ -858,9 +858,11 @@
+--- fs/ocfs2/journal.c.orig 2006-04-19 14:48:43.374023000 -0700
++++ fs/ocfs2/journal.c 2006-04-19 14:48:47.016943000 -0700
+@@ -858,9 +858,11 @@ static int ocfs2_force_read_journal(stru
if (p_blocks > CONCURRENT_JOURNAL_FILL)
p_blocks = CONCURRENT_JOURNAL_FILL;
@@ -17,9 +17,9 @@
goto bail;
Index: fs/ocfs2/uptodate.c
===================================================================
---- fs/ocfs2/uptodate.c.orig 2006-04-17 15:59:25.782171000 -0700
-+++ fs/ocfs2/uptodate.c 2006-04-18 14:00:53.923072000 -0700
-@@ -335,7 +335,7 @@
+--- fs/ocfs2/uptodate.c.orig 2006-04-19 14:46:39.889837000 -0700
++++ fs/ocfs2/uptodate.c 2006-04-19 14:48:47.028943000 -0700
+@@ -335,7 +335,7 @@ static void __ocfs2_set_buffer_uptodate(
mlog(0, "Inode %"MLFu64", block %llu, expand = %d\n",
oi->ip_blkno, (unsigned long long) block, expand_tree);
@@ -28,7 +28,7 @@
if (!new) {
mlog_errno(-ENOMEM);
return;
-@@ -347,7 +347,7 @@
+@@ -347,7 +347,7 @@ static void __ocfs2_set_buffer_uptodate(
* has no way of tracking that. */
for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) {
tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep,
Modified: branches/ocfs2-1.2-cert/patches/ocfs2_dlm-do_lvb_puts_inline2.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/ocfs2_dlm-do_lvb_puts_inline2.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/ocfs2_dlm-do_lvb_puts_inline2.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: fs/ocfs2/dlm/dlmconvert.c
===================================================================
---- fs/ocfs2/dlm/dlmconvert.c.orig 2006-04-18 14:34:41.613270000 -0700
-+++ fs/ocfs2/dlm/dlmconvert.c 2006-04-18 14:43:18.353118000 -0700
-@@ -214,6 +214,9 @@
+--- fs/ocfs2/dlm/dlmconvert.c.orig 2006-04-19 14:46:39.012159000 -0700
++++ fs/ocfs2/dlm/dlmconvert.c 2006-04-19 14:48:49.395074000 -0700
+@@ -214,6 +214,9 @@ grant:
if (lock->ml.node == dlm->node_num)
mlog(0, "doing in-place convert for nonlocal lock\n");
lock->ml.type = type;
@@ -14,9 +14,9 @@
goto unlock_exit;
Index: fs/ocfs2/dlm/dlmast.c
===================================================================
---- fs/ocfs2/dlm/dlmast.c.orig 2006-04-18 14:34:41.918273000 -0700
-+++ fs/ocfs2/dlm/dlmast.c 2006-04-18 14:44:08.809696000 -0700
-@@ -197,12 +197,12 @@
+--- fs/ocfs2/dlm/dlmast.c.orig 2006-04-19 14:46:39.106135000 -0700
++++ fs/ocfs2/dlm/dlmast.c 2006-04-19 14:48:49.409060000 -0700
+@@ -197,12 +197,12 @@ static void dlm_update_lvb(struct dlm_ct
lock->ml.node == dlm->node_num ? "master" :
"remote");
memcpy(lksb->lvb, res->lvb, DLM_LVB_LEN);
Modified: branches/ocfs2-1.2-cert/patches/ocfs2_heartbeat-better_I_O_error_handling.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/ocfs2_heartbeat-better_I_O_error_handling.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/ocfs2_heartbeat-better_I_O_error_handling.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -3,9 +3,9 @@
Index: fs/ocfs2/cluster/heartbeat.c
===================================================================
---- fs/ocfs2/cluster/heartbeat.c.orig 2006-04-18 14:34:33.022171000 -0700
-+++ fs/ocfs2/cluster/heartbeat.c 2006-04-19 12:15:19.641855000 -0700
-@@ -165,6 +165,7 @@
+--- fs/ocfs2/cluster/heartbeat.c.orig 2006-04-19 14:46:17.274036000 -0700
++++ fs/ocfs2/cluster/heartbeat.c 2006-04-19 14:49:30.912775000 -0700
+@@ -165,6 +165,7 @@ static spinlock_t o2hb_blocker_lock = SP
struct o2hb_bio_wait_ctxt {
atomic_t wc_num_reqs;
struct completion wc_io_complete;
@@ -13,7 +13,7 @@
};
static unsigned int o2hb_elapsed_msecs(struct timeval *start,
-@@ -293,6 +294,7 @@
+@@ -293,6 +294,7 @@ static inline void o2hb_bio_wait_init(st
{
atomic_set(&wc->wc_num_reqs, num_ios);
init_completion(&wc->wc_io_complete);
@@ -21,7 +21,7 @@
}
/* Used in error paths too */
-@@ -325,8 +327,10 @@
+@@ -325,8 +327,10 @@ static int o2hb_bio_end_io(struct bio *b
{
struct o2hb_bio_wait_ctxt *wc = bio->bi_private;
@@ -33,7 +33,7 @@
if (bio->bi_size)
return 1;
-@@ -515,6 +519,8 @@
+@@ -515,6 +519,8 @@ static int o2hb_read_slots(struct o2hb_r
bail_and_wait:
o2hb_mlog_blocking(reg, &start, "waiting for read completion");
o2hb_wait_on_io(reg, &wc);
@@ -42,7 +42,7 @@
o2hb_mlog_blocking_done(reg, &start);
if (bios) {
-@@ -917,7 +923,7 @@
+@@ -917,7 +923,7 @@ static int o2hb_highest_node(unsigned lo
return highest;
}
@@ -51,7 +51,7 @@
{
int i, ret, highest_node, change = 0;
unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)];
-@@ -925,13 +931,17 @@
+@@ -925,13 +931,17 @@ static void o2hb_do_disk_heartbeat(struc
struct o2hb_bio_wait_ctxt write_wc;
struct timeval start;
@@ -72,7 +72,7 @@
}
/* No sense in reading the slots of nodes that don't exist
-@@ -941,7 +951,7 @@
+@@ -941,7 +951,7 @@ static void o2hb_do_disk_heartbeat(struc
ret = o2hb_read_slots(reg, highest_node + 1);
if (ret < 0) {
mlog_errno(ret);
@@ -81,7 +81,7 @@
}
/* With an up to date view of the slots, we can check that no
-@@ -959,7 +969,7 @@
+@@ -959,7 +969,7 @@ static void o2hb_do_disk_heartbeat(struc
ret = o2hb_issue_node_write(reg, &write_bio, &write_wc);
if (ret < 0) {
mlog_errno(ret);
@@ -90,7 +90,7 @@
}
o2hb_mlog_blocking(reg, &start, "checking slots");
-@@ -979,6 +989,15 @@
+@@ -979,6 +989,15 @@ static void o2hb_do_disk_heartbeat(struc
o2hb_wait_on_io(reg, &write_wc);
o2hb_mlog_blocking_done(reg, &start);
bio_put(write_bio);
@@ -106,7 +106,7 @@
o2hb_arm_write_timeout(reg);
/* let the person who launched us know when things are steady */
-@@ -986,6 +1005,8 @@
+@@ -986,6 +1005,8 @@ static void o2hb_do_disk_heartbeat(struc
if (atomic_dec_and_test(®->hr_steady_iterations))
wake_up(&o2hb_steady_queue);
}
@@ -115,7 +115,7 @@
}
/* Subtract b from a, storing the result in a. a *must* have a larger
-@@ -1045,7 +1066,10 @@
+@@ -1045,7 +1066,10 @@ static int o2hb_thread(void *data)
* likely to time itself out. */
do_gettimeofday(&before_hb);
Modified: branches/ocfs2-1.2-cert/patches/reassert-vs-migration.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/reassert-vs-migration.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/reassert-vs-migration.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmmaster.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 12:15:14.671470000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 12:15:24.584240000 -0700
-@@ -2062,6 +2062,24 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:49:26.486094000 -0700
++++ cert3/fs/ocfs2/dlm/dlmmaster.c 2006-04-19 14:49:37.599521000 -0700
+@@ -1995,6 +1995,23 @@ void dlm_assert_master_worker(struct dlm
}
}
@@ -14,8 +14,7 @@
+ */
+ spin_lock(&res->spinlock);
+ if (res->state & DLM_LOCK_RES_MIGRATING) {
-+ mlog(ML_NOTICE,
-+ "Someone asked us to assert mastery, but we're "
++ mlog(0, "Someone asked us to assert mastery, but we're "
+ "in the middle of migration. Skipping assert, "
+ "the new master will handle that.\n");
+ spin_unlock(&res->spinlock);
@@ -27,7 +26,7 @@
/* this call now finishes out the nodemap
* even if one or more nodes die */
mlog(0, "worker about to master %.*s here, this=%u\n",
-@@ -2074,6 +2092,10 @@
+@@ -2007,6 +2024,10 @@ void dlm_assert_master_worker(struct dlm
mlog_errno(ret);
}
Added: branches/ocfs2-1.2-cert/patches/remote-lock-during-reco-msgs.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/remote-lock-during-reco-msgs.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/remote-lock-during-reco-msgs.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -0,0 +1,26 @@
+Index: cert3/fs/ocfs2/dlm/dlmlock.c
+===================================================================
+--- cert3.orig/fs/ocfs2/dlm/dlmlock.c 2006-04-19 14:49:41.995233000 -0700
++++ cert3/fs/ocfs2/dlm/dlmlock.c 2006-04-19 14:49:44.191589000 -0700
+@@ -232,7 +232,7 @@ static enum dlm_status dlmlock_remote(st
+ /* recovery lock was mastered by dead node.
+ * we need to have calc_usage shoot down this
+ * lockres and completely remaster it. */
+- mlog(0, "%s: recovery lock was owned by "
++ mlog(ML_NOTICE, "%s: recovery lock was owned by "
+ "dead node %u, remaster it now.\n",
+ dlm->name, res->owner);
+ } else if (status != DLM_NOTQUEUED) {
+Index: cert3/fs/ocfs2/dlm/dlmrecovery.c
+===================================================================
+--- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:49:42.033233000 -0700
++++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:49:44.217579000 -0700
+@@ -2425,7 +2425,7 @@ again:
+ dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
+ status = -EEXIST;
+ } else if (ret == DLM_RECOVERING) {
+- mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
++ mlog(ML_NOTICE, "dlm=%s dlmlock says master node died (this=%u)\n",
+ dlm->name, dlm->node_num);
+ goto again;
+ } else {
Modified: branches/ocfs2-1.2-cert/patches/remove-bad-spin-unlock.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/remove-bad-spin-unlock.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/remove-bad-spin-unlock.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmrecovery.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 12:15:29.550625000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 12:15:37.440041000 -0700
-@@ -648,7 +648,6 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:49:48.801979000 -0700
++++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:49:55.629726000 -0700
+@@ -648,7 +648,6 @@ static int dlm_remaster_locks(struct dlm
"requesting recovery info for "
"node %u\n", ndata->node_num,
dead_node);
Modified: branches/ocfs2-1.2-cert/patches/series
===================================================================
--- branches/ocfs2-1.2-cert/patches/series 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/series 2006-04-19 22:02:49 UTC (rev 2853)
@@ -29,25 +29,38 @@
fix-purge-lockres.patch -p0
dlm-eloop.patch -p0
lvb-recovery-fix.patch -p0
+dlm-lvb-debug-pretty-print.patch
mar24-create-lock-handler.patch -p0
mastery-restart-recovery.patch -p0
+dlm-restart-mastery-debug-msgs.patch
hold-dirty-ref.patch
fix-recovery-spin.patch
+dlm-more-time-needed-for-hb-detection.patch
fix-dlmlock_remote.patch
ocfs2_heartbeat-better_I_O_error_handling.patch -p0
never-purge-master.patch
+dlm-calc-usage-warning.patch
reassert-vs-migration.patch
+dlm-cancel-assert-during-migrate-notice.patch
fix-remote-lock-during-reco.patch
+remote-lock-during-reco-msgs.patch
fix-death-during-recovery.patch
+enomem-on-other-reco-node-warning.patch
ocfs2-1.2-no-idr-0.patch -p0
dlm-mlog_to_printk
remove-bad-spin-unlock.patch
continue-finalize-reco.patch
fix-wait-in-mastery.patch
+dlm-wait-for-recovery-msgs.patch
dlm-replace_gfp_kernel_with_nofs
move-dlm-work-to-thread.patch
+change-yield-to-cond_resched.patch
hb-add_tracking_around_configured_nodes
jrnl-change_gfp_kernel_to_nofs
-debug-bad-recovery.patch
+dlm-check-recovery-flag-before-convert.patch
+bad-lock-during-convert-debug-msg.patch
+fix-wait-for-recovery.patch
+dlm-quiet-noisy-mastery-reco-msgs.patch
dlm-new_proc_entry
dlm-fix_dlm_lock_reco_handling.patch
+fix-trailing-whitespace-1.patch
Modified: branches/ocfs2-1.2-cert/patches/two-stage-finalize.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/two-stage-finalize.patch 2006-04-19 22:00:35 UTC (rev 2852)
+++ branches/ocfs2-1.2-cert/patches/two-stage-finalize.patch 2006-04-19 22:02:49 UTC (rev 2853)
@@ -1,8 +1,8 @@
Index: cert3/fs/ocfs2/dlm/dlmcommon.h
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmcommon.h 2006-04-19 11:25:06.000000000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmcommon.h 2006-04-19 11:31:21.585283000 -0700
-@@ -61,7 +61,8 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmcommon.h 2006-04-19 14:48:55.700045000 -0700
++++ cert3/fs/ocfs2/dlm/dlmcommon.h 2006-04-19 14:49:00.178756000 -0700
+@@ -61,7 +61,8 @@ static inline int dlm_is_recovery_lock(c
return 0;
}
@@ -12,7 +12,7 @@
struct dlm_recovery_ctxt
{
-@@ -618,7 +619,8 @@
+@@ -618,7 +619,8 @@ struct dlm_finalize_reco
{
u8 node_idx;
u8 dead_node;
@@ -24,9 +24,9 @@
Index: cert3/fs/ocfs2/dlm/dlmrecovery.c
===================================================================
---- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 11:25:07.000000000 -0700
-+++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 12:10:44.996421000 -0700
-@@ -134,12 +134,18 @@
+--- cert3.orig/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:48:58.026401000 -0700
++++ cert3/fs/ocfs2/dlm/dlmrecovery.c 2006-04-19 14:49:00.203731000 -0700
+@@ -134,12 +134,18 @@ static inline void dlm_set_reco_master(s
dlm->reco.new_master = master;
}
@@ -47,7 +47,7 @@
spin_unlock(&dlm->spinlock);
}
-@@ -380,7 +386,7 @@
+@@ -380,7 +386,7 @@ void dlm_wait_for_recovery(struct dlm_ct
dlm->name, dlm->dlm_reco_thread_task->pid,
dlm->reco.state, dlm->reco.new_master,
dlm->reco.dead_node);
@@ -56,7 +56,7 @@
}
while (1) {
-@@ -393,7 +399,7 @@
+@@ -393,7 +399,7 @@ void dlm_wait_for_recovery(struct dlm_ct
dlm->name, dlm->dlm_reco_thread_task->pid,
dlm->reco.state, dlm->reco.new_master,
dlm->reco.dead_node);
@@ -65,7 +65,7 @@
}
// wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
}
-@@ -2098,6 +2104,20 @@
+@@ -2098,6 +2104,20 @@ void __dlm_hb_node_down(struct dlm_ctxt
{
assert_spin_locked(&dlm->spinlock);
@@ -86,7 +86,7 @@
/* check to see if the node is already considered dead */
if (!test_bit(idx, dlm->live_nodes_map)) {
mlog(0, "for domain %s, node %d is already dead. "
-@@ -2404,6 +2424,13 @@
+@@ -2404,6 +2424,13 @@ retry:
* another ENOMEM */
msleep(100);
goto retry;
@@ -100,7 +100,7 @@
}
}
-@@ -2419,6 +2446,17 @@
+@@ -2419,6 +2446,17 @@ int dlm_begin_reco_handler(struct o2net_
if (!dlm_grab(dlm))
return 0;
@@ -118,7 +118,7 @@
mlog(ML_NOTICE, "%s: node %u wants to recover node %u (%u:%u)\n",
dlm->name, br->node_idx, br->dead_node,
dlm->reco.dead_node, dlm->reco.new_master);
-@@ -2472,6 +2510,7 @@
+@@ -2472,6 +2510,7 @@ int dlm_begin_reco_handler(struct o2net_
return 0;
}
@@ -126,7 +126,7 @@
static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
{
int ret = 0;
-@@ -2479,25 +2518,31 @@
+@@ -2479,25 +2518,31 @@ static int dlm_send_finalize_reco_messag
struct dlm_node_iter iter;
int nodenum;
int status;
@@ -161,7 +161,7 @@
if (dlm_is_host_down(ret)) {
/* this has no effect on this recovery
* session, so set the status to zero to
-@@ -2506,12 +2551,15 @@
+@@ -2506,12 +2551,15 @@ static int dlm_send_finalize_reco_messag
"node finished recovery.\n", nodenum);
ret = 0;
}
@@ -180,7 +180,7 @@
return ret;
}
-@@ -2520,14 +2568,18 @@
+@@ -2520,14 +2568,18 @@ int dlm_finalize_reco_handler(struct o2n
{
struct dlm_ctxt *dlm = data;
struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
@@ -202,7 +202,7 @@
spin_lock(&dlm->spinlock);
-@@ -2544,13 +2596,38 @@
+@@ -2544,13 +2596,38 @@ int dlm_finalize_reco_handler(struct o2n
BUG();
}
More information about the Ocfs2-commits
mailing list