[Ocfs2-commits] zab commits r2451 - trunk/fs/ocfs2/cluster

svn-commits at oss.oracle.com svn-commits at oss.oracle.com
Fri Jul 1 11:49:22 CDT 2005


Author: zab
Signed-off-by: mfasheh
Date: 2005-07-01 11:49:20 -0500 (Fri, 01 Jul 2005)
New Revision: 2451

Added:
   trunk/fs/ocfs2/cluster/quorum.c
   trunk/fs/ocfs2/cluster/quorum.h
Modified:
   trunk/fs/ocfs2/cluster/Makefile
   trunk/fs/ocfs2/cluster/tcp.c
   trunk/fs/ocfs2/cluster/tcp_internal.h
Log:
Move the quorum decision making off into its own file and comment
the heck out of it.  In the process close some bad races that could trigger
fencing in perfectly benign circumstances.

Signed-off-by: mfasheh


Modified: trunk/fs/ocfs2/cluster/Makefile
===================================================================
--- trunk/fs/ocfs2/cluster/Makefile	2005-06-30 01:51:32 UTC (rev 2450)
+++ trunk/fs/ocfs2/cluster/Makefile	2005-07-01 16:49:20 UTC (rev 2451)
@@ -28,6 +28,7 @@
 	masklog.c		\
 	net_proc.c		\
 	nodemanager.c		\
+	quorum.c		\
 	tcp.c			\
 	ver.c
 
@@ -37,6 +38,7 @@
 	nodemanager.h		\
 	ocfs2_heartbeat.h	\
 	ocfs2_nodemanager.h	\
+	quorum.h		\
 	tcp.h			\
 	tcp_internal.h		\
 	ver.h

Added: trunk/fs/ocfs2/cluster/quorum.c
===================================================================
--- trunk/fs/ocfs2/cluster/quorum.c	2005-06-30 01:51:32 UTC (rev 2450)
+++ trunk/fs/ocfs2/cluster/quorum.c	2005-07-01 16:49:20 UTC (rev 2451)
@@ -0,0 +1,294 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ *
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * Copyright (C) 2005 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ * 
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+/* This quorum hack is only here until we transition to some more rational
+ * approach that is driven from userspace.  Honest.  No foolin'.
+ *
+ * Imagine two nodes lose network connectivity to each other but they're still
+ * up and operating in every other way.  Presumably a network timeout indicates
+ * that a node is broken and should be recovered.  They can't both recover each
+ * other and both carry on without serialising their access to the file system.
+ * They need to decide who is authoritative.  Now extend that problem to
+ * arbitrary groups of nodes losing connectivity between each other.
+ *
+ * So we declare that a node which has given up on connecting to a majority
+ * of nodes who are still heartbeating will fence itself.
+ *
+ * There are huge opportunities for races here.  After we give up on a node's
+ * connection we need to wait long enough to give heartbeat an opportunity
+ * to declare the node as truly dead.  We also need to be careful with the
+ * race between when we see a node start heartbeating and when we connect
+ * to it.
+ *
+ * So nodes that are in this transtion put a hold on the quorum decision
+ * with a counter.  As they fall out of this transition they drop the count
+ * and if they're the last, they fire off the decision.
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "heartbeat.h"
+#include "nodemanager.h"
+#define MLOG_MASK_PREFIX ML_QUORUM
+#include "masklog.h"
+
+static struct o2quo_state {
+	spinlock_t		qs_lock;
+	struct work_struct	qs_work;
+	int			qs_pending;
+	int			qs_heartbeating;
+	unsigned long		qs_hb_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
+	int			qs_connected;
+	unsigned long		qs_conn_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
+	int			qs_holds;
+	unsigned long		qs_hold_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
+} o2quo_state;
+
+static void o2quo_make_decision(void *arg)
+{
+	int quorum;
+	int lowest_hb, lowest_reachable = 0, fence = 0;
+	struct o2quo_state *qs = &o2quo_state;
+
+	spin_lock(&qs->qs_lock);
+
+	lowest_hb = find_first_bit(qs->qs_hb_bm, O2NM_MAX_NODES);
+	if (lowest_hb != O2NM_MAX_NODES)
+		lowest_reachable = test_bit(lowest_hb, qs->qs_conn_bm);
+
+	mlog(0, "heartbeating: %d, connected: %d, "
+	     "lowest: %d (%sreachable)\n", qs->qs_heartbeating,
+	     qs->qs_connected, lowest_hb, lowest_reachable ? "" : "un");
+
+	if (!test_bit(o2nm_this_node(), qs->qs_hb_bm) ||
+	    qs->qs_heartbeating == 1)
+		goto out;
+
+	if (qs->qs_heartbeating & 1) {
+		/* the odd numbered cluster case is straight forward --
+		 * if we can't talk to the majority we're hosed */
+		quorum = (qs->qs_heartbeating + 1)/2;
+		if (qs->qs_connected < quorum) {
+			mlog(ML_ERROR, "fencing this node because it is "
+			     "only connected to %u nodes and %u is needed "
+			     "to make a quorum out of %u heartbeating nodes\n",
+			     qs->qs_connected, quorum, 
+			     qs->qs_heartbeating);
+			fence = 1;
+		}
+	} else {
+		/* the even numbered cluster adds the possibility of each half
+		 * of the cluster being able to talk amongst themselves.. in
+		 * that case we're hosed if we can't talk to the group that has
+		 * the lowest numbered node */
+		quorum = qs->qs_heartbeating / 2;
+		if (qs->qs_connected < quorum) {
+			mlog(ML_ERROR, "fencing this node because it is "
+			     "only connected to %u nodes and %u is needed "
+			     "to make a quorum out of %u heartbeating nodes\n",
+			     qs->qs_connected, quorum, 
+			     qs->qs_heartbeating);
+		}
+		else if ((qs->qs_connected == quorum) && 
+			 !lowest_reachable) {
+			mlog(ML_ERROR, "fencing this node because it is "
+			     "connected to a half-quorum of %u out of %u "
+			     "nodes which doesn't include the lowest active "
+			     "node %u\n", quorum, qs->qs_heartbeating,
+			     lowest_hb);
+			fence = 1;
+		}
+	}
+	
+out:
+	spin_unlock(&qs->qs_lock);
+	/* this is horribly heavy-handed.  It should instead flip the file
+	 * system RO and call some userspace script */
+	if (fence) {
+		/* panic spins with interrupts enabled.  with preempt
+		 * threads can still schedule, etc, etc */
+		o2hb_stop_all_regions();
+		panic("ocfs2 is very sorry to be fencing this system by "
+		      "panicing\n");
+	}
+}
+
+static void o2quo_set_hold(struct o2quo_state *qs, u8 node)
+{
+	assert_spin_locked(&qs->qs_lock);
+
+	if (!test_and_set_bit(node, qs->qs_hold_bm)) {
+		qs->qs_holds++;
+		mlog_bug_on_msg(qs->qs_holds == O2NM_MAX_NODES,
+			        "node %u\n", node);
+		mlog(0, "node %u, %d total\n", node, qs->qs_holds);
+	}
+}
+
+static void o2quo_clear_hold(struct o2quo_state *qs, u8 node)
+{
+	assert_spin_locked(&qs->qs_lock);
+
+	if (test_and_clear_bit(node, qs->qs_hold_bm)) {
+		mlog(0, "node %u, %d total\n", node, qs->qs_holds - 1);
+		if (--qs->qs_holds == 0) {
+			if (qs->qs_pending) {
+				qs->qs_pending = 0;
+				schedule_work(&qs->qs_work);
+			}
+		}
+		mlog_bug_on_msg(qs->qs_holds < 0, "node %u, holds %d\n",
+				node, qs->qs_holds);
+	}
+}
+
+/* as a node comes up we delay the quorum decision until we know the fate of
+ * the connection.  the hold will be droped in conn_up or hb_down.  it might be
+ * perpetuated by con_err until hb_down.  if we already have a conn, we might
+ * be dropping a hold that conn_up got. */
+void o2quo_hb_up(u8 node)
+{
+	struct o2quo_state *qs = &o2quo_state;
+
+	spin_lock(&qs->qs_lock);
+
+	qs->qs_heartbeating++;
+	mlog_bug_on_msg(qs->qs_heartbeating == O2NM_MAX_NODES,
+		        "node %u\n", node);
+	mlog_bug_on_msg(test_bit(node, qs->qs_hb_bm), "node %u\n", node);
+	set_bit(node, qs->qs_hb_bm);
+
+	mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating);
+
+	if (!test_bit(node, qs->qs_conn_bm))
+		o2quo_set_hold(qs, node);
+	else
+		o2quo_clear_hold(qs, node);
+
+	spin_unlock(&qs->qs_lock);
+}
+
+/* hb going down releases any holds we might have had due to this node from
+ * conn_up, conn_err, or hb_up */
+void o2quo_hb_down(u8 node)
+{
+	struct o2quo_state *qs = &o2quo_state;
+
+	spin_lock(&qs->qs_lock);
+
+	qs->qs_heartbeating--;
+	mlog_bug_on_msg(qs->qs_heartbeating < 0,
+			"node %u, %d heartbeating\n",
+			node, qs->qs_heartbeating);
+	mlog_bug_on_msg(!test_bit(node, qs->qs_hb_bm), "node %u\n", node);
+	clear_bit(node, qs->qs_hb_bm);
+
+	mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating);
+
+	o2quo_clear_hold(qs, node);
+
+	spin_unlock(&qs->qs_lock);
+}
+
+/* this tells us that we've decided that the node is still heartbeating
+ * even though we've lost it's conn.  it must only be called after conn_err
+ * and indicates that we must now make a quorum decision in the future, 
+ * though we might be doing so after waiting for holds to drain.  Here
+ * we'll be dropping the hold from conn_err. */
+void o2quo_hb_still_up(u8 node)
+{
+	struct o2quo_state *qs = &o2quo_state;
+
+	spin_lock(&qs->qs_lock);
+
+	mlog(0, "node %u\n", node);
+
+	qs->qs_pending = 1;
+	o2quo_clear_hold(qs, node);
+
+	spin_unlock(&qs->qs_lock);
+}
+
+/* This is analagous to hb_up.  as a node's connection comes up we delay the
+ * quorum decision until we see it heartbeating.  the hold will be droped in
+ * hb_up or hb_down.  it might be perpetuated by con_err until hb_down.  if
+ * it's already heartbeating we we might be dropping a hold that conn_up got.
+ * */
+void o2quo_conn_up(u8 node)
+{
+	struct o2quo_state *qs = &o2quo_state;
+
+	spin_lock(&qs->qs_lock);
+
+	qs->qs_connected++;
+	mlog_bug_on_msg(qs->qs_connected == O2NM_MAX_NODES,
+		        "node %u\n", node);
+	mlog_bug_on_msg(test_bit(node, qs->qs_conn_bm), "node %u\n", node);
+	set_bit(node, qs->qs_conn_bm);
+
+	mlog(0, "node %u, %d total\n", node, qs->qs_connected);
+
+	if (!test_bit(node, qs->qs_hb_bm))
+		o2quo_set_hold(qs, node);
+	else
+		o2quo_clear_hold(qs, node);
+
+	spin_unlock(&qs->qs_lock);
+}
+
+/* we've decided that we won't ever be connecting to the node again.  if it's
+ * still heartbeating we grab a hold that will delay decisions until either the
+ * node stops heartbeating from hb_down or the caller decides that the node is
+ * still up and calls still_up */
+void o2quo_conn_err(u8 node)
+{
+	struct o2quo_state *qs = &o2quo_state;
+
+	spin_lock(&qs->qs_lock);
+
+	qs->qs_connected--;
+	mlog_bug_on_msg(qs->qs_connected < 0,
+			"node %u, connected %d\n",
+			node, qs->qs_connected);
+	mlog_bug_on_msg(!test_bit(node, qs->qs_conn_bm), "node %u\n", node);
+	clear_bit(node, qs->qs_conn_bm);
+
+	mlog(0, "node %u, %d total\n", node, qs->qs_connected);
+
+	if (test_bit(node, qs->qs_hb_bm))
+		o2quo_set_hold(qs, node);
+
+	spin_unlock(&qs->qs_lock);
+}
+
+void o2quo_init(void)
+{
+	struct o2quo_state *qs = &o2quo_state;
+
+	spin_lock_init(&qs->qs_lock);
+	INIT_WORK(&qs->qs_work, o2quo_make_decision, NULL);
+}
+
+void o2quo_exit(void)
+{
+	flush_scheduled_work();
+}

Added: trunk/fs/ocfs2/cluster/quorum.h
===================================================================
--- trunk/fs/ocfs2/cluster/quorum.h	2005-06-30 01:51:32 UTC (rev 2450)
+++ trunk/fs/ocfs2/cluster/quorum.h	2005-07-01 16:49:20 UTC (rev 2451)
@@ -0,0 +1,35 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * Copyright (C) 2005 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ * 
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+#ifndef O2CLUSTER_QUORUM_H
+#define O2CLUSTER_QUORUM_H
+
+void o2quo_init(void);
+void o2quo_exit(void);
+
+void o2quo_hb_up(u8 node);
+void o2quo_hb_down(u8 node);
+void o2quo_hb_still_up(u8 node);
+void o2quo_conn_up(u8 node);
+void o2quo_conn_err(u8 node);
+
+#endif /* O2CLUSTER_QUORUM_H */

Modified: trunk/fs/ocfs2/cluster/tcp.c
===================================================================
--- trunk/fs/ocfs2/cluster/tcp.c	2005-06-30 01:51:32 UTC (rev 2450)
+++ trunk/fs/ocfs2/cluster/tcp.c	2005-07-01 16:49:20 UTC (rev 2451)
@@ -55,7 +55,6 @@
 
 #include <linux/kernel.h>
 #include <linux/slab.h>
-#include <linux/kthread.h>
 #include <linux/kref.h>
 #include <net/tcp.h>
 
@@ -66,6 +65,7 @@
 #include "nodemanager.h"
 #define MLOG_MASK_PREFIX ML_TCP
 #include "masklog.h"
+#include "quorum.h"
 
 #include "tcp_internal.h"
 
@@ -108,7 +108,7 @@
  * destroying the work queue.
  */
 static struct workqueue_struct *o2net_wq;
-static struct work_struct o2net_listen_work, o2net_quorum_work;
+static struct work_struct o2net_listen_work;
 
 static struct o2hb_callback_func o2net_hb_up, o2net_hb_down;
 #define O2NET_HB_PRI 0x1
@@ -116,11 +116,6 @@
 static struct o2net_handshake *o2net_hand;
 static o2net_msg *o2net_keep_req, *o2net_keep_resp;
 
-/* these node totals include our node.  I think the hb and net threads
- * sufficiently serialize things so that these don't need locking */
-static int o2net_heartbeating_nodes = 0, o2net_connected_nodes = 0;
-static unsigned long o2net_connected_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
-
 static int o2net_sys_err_translations[O2NET_ERR_MAX] =
 		{[O2NET_ERR_NONE]	= 0,
 		 [O2NET_ERR_NO_HNDLR]	= -ENOPROTOOPT,
@@ -377,30 +372,12 @@
 		sc_put(sc);
 }
 
-static void o2net_mod_connected_nodes(u8 node, int delta)
-{
-	BUG_ON(delta == 0);
-	if (delta < 0) {
-		mlog_bug_on_msg(o2net_connected_nodes == 0,
-				"node %u delta %d\n", node, delta);
-		o2net_connected_nodes--;
-		clear_bit(node, o2net_connected_bitmap);
-	} else {
-		mlog_bug_on_msg(o2net_connected_nodes == O2NM_MAX_NODES,
-				"node %u delta %d\n", node, delta);
-		o2net_connected_nodes++;
-		set_bit(node, o2net_connected_bitmap);
-	}
-
-	mlog(ML_QUORUM, "node %u %sconnected, now %u connected\n", node,
-	     delta < 0 ? "dis" : "", o2net_connected_nodes);
-}
-
 static void o2net_set_nn_state(struct o2net_node *nn,
 			       struct o2net_sock_container *sc,
 			       unsigned valid, int err)
 {
 	int was_valid = nn->nn_sc_valid;
+	int was_err = nn->nn_persistent_error;
 	struct o2net_sock_container *old_sc = nn->nn_sc;
 
 	assert_spin_locked(&nn->nn_lock);
@@ -428,12 +405,13 @@
 	if (nn->nn_persistent_error || nn->nn_sc_valid)
 		wake_up(&nn->nn_sc_wq);
 
-	if (was_valid && !valid) {
-		o2net_mod_connected_nodes(old_sc->sc_node->nd_num, -1);
-		if (o2net_wq)
-			queue_delayed_work(o2net_wq, &o2net_quorum_work,
-				      msecs_to_jiffies(O2NET_QUORUM_DELAY_MS));
+	if (!was_err && nn->nn_persistent_error) {
+		o2quo_conn_err(o2net_num_from_nn(nn));
+		queue_delayed_work(o2net_wq, &nn->nn_still_up,
+				   O2NET_QUORUM_DELAY_MS);
+	}
 
+	if (was_valid && !valid) {
 		mlog(ML_NOTICE, "no longer connected to "
 		       "node %s at %u.%u.%u.%u:%d\n",
 		       old_sc->sc_node->nd_name,
@@ -443,13 +421,13 @@
 	}
 
 	if (!was_valid && valid) {
+		o2quo_conn_up(o2net_num_from_nn(nn));
 		/* this is a bit of a hack.  we only try reconnecting
 		 * when heartbeating starts until we get a connection.
 		 * if that connection then dies we don't try reconnecting.
 		 * the only way to start connecting again is to down
 		 * heartbeat and bring it back up. */
 		cancel_delayed_work(&nn->nn_connect_expired);
-		o2net_mod_connected_nodes(sc->sc_node->nd_num, 1);
 		mlog(ML_NOTICE, "%s node %s num %u at %u.%u.%u.%u:%d\n",
 		     o2nm_this_node() > sc->sc_node->nd_num ?
 		     	"connected to" : "accepted connection from",
@@ -1458,91 +1436,20 @@
 	spin_unlock(&nn->nn_lock);
 }
 
-/* ------------------------------------------------------------ */
-
-/* this work func is queued whenever a disconnection makes it such that
- * we have half or fewer nodes connected than we do heartbeating.  if by
- * the time we get here we still have that imbalance we decide that we
- * cannot reach the majority of the cluster and cut ourselves off.  this
- * work is delayed so that heartbeat has a chance to notice dead nodes
- * and account for them in the heartbeating count before getting here */
-static void o2net_check_quorum(void *arg)
+static void o2net_still_up(void *arg)
 {
-	int quorum;
-	unsigned long heartbeating[BITS_TO_LONGS(O2NM_MAX_NODES)];
-	int lowest_hb, lowest_reachable = 0, fence = 0;
+	struct o2net_node *nn = arg;
 
-	/* usually _callback would be used from a hb callback to avoid
-	 * deadlocking on the hb_callback_sem.  we might as well be
-	 * in a callback sem.  our hb callback func will flush the work
-	 * queue until we're finished and it holds the callback sem.. */
-	o2hb_fill_node_map_from_callback(heartbeating, sizeof(heartbeating));
-	lowest_hb = find_first_bit(heartbeating, O2NM_MAX_NODES);
-	if (lowest_hb != O2NM_MAX_NODES)
-		lowest_reachable = test_bit(lowest_hb, o2net_connected_bitmap);
-
-	mlog(ML_QUORUM, "heartbeating: %u, connected: %u, "
-	     "lowest: %u (%sreachable)\n", o2net_heartbeating_nodes,
-	     o2net_connected_nodes, lowest_hb,
-	     lowest_reachable ? "" : "un");
-
-	if (!o2hb_check_local_node_heartbeating_from_callback() ||
-	    o2net_heartbeating_nodes == 1)
-		goto out;
-
-	if (o2net_heartbeating_nodes & 1) {
-		/* the odd numbered cluster case is straight forward --
-		 * if we can't talk to the majority we're hosed */
-		quorum = (o2net_heartbeating_nodes + 1)/2;
-		if (o2net_connected_nodes < quorum) {
-			mlog(ML_ERROR, "fencing this node because it is "
-			     "only connected to %u nodes and %u is needed "
-			     "to make a quorum out of %u heartbeating nodes\n",
-			     o2net_connected_nodes, quorum, 
-			     o2net_heartbeating_nodes);
-			fence = 1;
-		}
-	} else {
-		/* the even numbered cluster adds the possibility of each half
-		 * of the cluster being able to talk amongst themselves.. in
-		 * that case we're hosed if we can't talk to the group that has
-		 * the lowest numbered node */
-		quorum = o2net_heartbeating_nodes / 2;
-		if (o2net_connected_nodes < quorum) {
-			mlog(ML_ERROR, "fencing this node because it is "
-			     "only connected to %u nodes and %u is needed "
-			     "to make a quorum out of %u heartbeating nodes\n",
-			     o2net_connected_nodes, quorum, 
-			     o2net_heartbeating_nodes);
-		}
-		else if ((o2net_connected_nodes == quorum) && 
-			 !lowest_reachable) {
-			mlog(ML_ERROR, "fencing this node because it is "
-			     "connected to a half-quorum of %u out of %u "
-			     "nodes which doesn't include the lowest active "
-			     "node %u\n", quorum, o2net_heartbeating_nodes,
-			     lowest_hb);
-			fence = 1;
-		}
-	}
-	
-out:
-	/* this is horribly heavy-handed.  It should instead flip the file
-	 * system RO and call some userspace script */
-	if (fence) {
-		/* panic spins with interrupts enabled.  with preempt
-		 * threads can still schedule, etc, etc */
-		o2hb_stop_all_regions();
-		panic("ocfs2 is very sorry to be fencing this system by "
-		      "panicing\n");
-	}
+	o2quo_hb_still_up(o2net_num_from_nn(nn));
 }
 
+/* ------------------------------------------------------------ */
+
 void o2net_disconnect_node(struct o2nm_node *node)
 {
 	struct o2net_node *nn = o2net_nn_from_num(node->nd_num);
 
-	/* don't reconnecting until it's heartbeating again */ 
+	/* don't reconnect until it's heartbeating again */ 
 	spin_lock(&nn->nn_lock);
 	o2net_set_nn_state(nn, NULL, 0, -ENOTCONN);
 	spin_unlock(&nn->nn_lock);
@@ -1550,6 +1457,7 @@
 	if (o2net_wq) {
 		cancel_delayed_work(&nn->nn_connect_expired);
 		cancel_delayed_work(&nn->nn_connect_work);
+		cancel_delayed_work(&nn->nn_still_up);
 		flush_workqueue(o2net_wq);
 	}
 }
@@ -1557,14 +1465,10 @@
 static void o2net_hb_node_down_cb(struct o2nm_node *node, int node_num,
 				  void *data)
 {
-	o2net_heartbeating_nodes--;
-	mlog(ML_QUORUM, "node %u, now %u heartbeating\n", node_num,
-	     o2net_heartbeating_nodes);
+	o2quo_hb_down(node_num);
 
 	if (node_num != o2nm_this_node())
 		o2net_disconnect_node(node);
-	else 
-		cancel_delayed_work(&o2net_quorum_work);
 }
 
 static void o2net_hb_node_up_cb(struct o2nm_node *node, int node_num,
@@ -1572,9 +1476,7 @@
 {
 	struct o2net_node *nn = o2net_nn_from_num(node_num);
 
-	o2net_heartbeating_nodes++;
-	mlog(ML_QUORUM, "node %u, now %u heartbeating\n", node_num,
-	     o2net_heartbeating_nodes);
+	o2quo_hb_up(node_num);
 
 	/* ensure an immediate connect attempt */
 	nn->nn_last_connect_attempt = jiffies - 
@@ -1861,7 +1763,7 @@
 		destroy_workqueue(o2net_wq);
 		o2net_wq = NULL;
 	} else 
-		o2net_mod_connected_nodes(node->nd_num, 1);
+		o2quo_conn_up(node->nd_num);
 
 	return ret;
 }
@@ -1892,14 +1794,13 @@
 	
 	/* finish all work and tear down the work queue */  
 	mlog(ML_KTHREAD, "waiting for o2net thread to exit....\n");
-	cancel_delayed_work(&o2net_quorum_work);
 	destroy_workqueue(o2net_wq);
 	o2net_wq = NULL;
 
 	sock_release(o2net_listen_sock);
 	o2net_listen_sock = NULL;
 
-	o2net_mod_connected_nodes(node->nd_num, -1);
+	o2quo_conn_err(node->nd_num);
 }
 
 /* ------------------------------------------------------------ */
@@ -1908,6 +1809,8 @@
 {
 	unsigned long i;
 
+	o2quo_init();
+
 	o2net_hand = kcalloc(1, sizeof(struct o2net_handshake), GFP_KERNEL);
 	o2net_keep_req = kcalloc(1, sizeof(o2net_msg), GFP_KERNEL);
 	o2net_keep_resp = kcalloc(1, sizeof(o2net_msg), GFP_KERNEL);
@@ -1924,14 +1827,13 @@
 	o2net_keep_req->magic = cpu_to_be16(O2NET_MSG_KEEP_REQ_MAGIC);
 	o2net_keep_resp->magic = cpu_to_be16(O2NET_MSG_KEEP_RESP_MAGIC);
 
-	INIT_WORK(&o2net_quorum_work, o2net_check_quorum, NULL);
-
 	for (i = 0; i < ARRAY_SIZE(o2net_nodes); i++) {
 		struct o2net_node *nn = o2net_nn_from_num(i);
 
 		spin_lock_init(&nn->nn_lock);
 		INIT_WORK(&nn->nn_connect_work, o2net_start_connect, nn);
 		INIT_WORK(&nn->nn_connect_expired, o2net_connect_expired, nn);
+		INIT_WORK(&nn->nn_still_up, o2net_still_up, nn);
 		/* until we see hb from a node we'll return einval */
 		nn->nn_persistent_error = -ENOTCONN;
 		init_waitqueue_head(&nn->nn_sc_wq);
@@ -1944,6 +1846,7 @@
 
 void o2net_exit(void)
 {
+	o2quo_exit();
 	kfree(o2net_hand);
 	kfree(o2net_keep_req);
 	kfree(o2net_keep_resp);

Modified: trunk/fs/ocfs2/cluster/tcp_internal.h
===================================================================
--- trunk/fs/ocfs2/cluster/tcp_internal.h	2005-06-30 01:51:32 UTC (rev 2450)
+++ trunk/fs/ocfs2/cluster/tcp_internal.h	2005-07-01 16:49:20 UTC (rev 2451)
@@ -79,6 +79,11 @@
 	 * established.  this expiring gives up on the node and errors out
 	 * transmits */
 	struct work_struct		nn_connect_expired;
+
+	/* after we give up on a socket we wait a while before deciding
+	 * that it is still heartbeating and that we should do some
+	 * quorum work */
+	struct work_struct		nn_still_up;
 };
 
 struct o2net_sock_container {



More information about the Ocfs2-commits mailing list