[Ocfs2-commits] zab commits r1876 - trunk/fs/ocfs2/cluster
svn-commits at oss.oracle.com
svn-commits at oss.oracle.com
Tue Feb 1 16:24:57 CST 2005
Author: zab
Date: 2005-02-01 16:24:55 -0600 (Tue, 01 Feb 2005)
New Revision: 1876
Modified:
trunk/fs/ocfs2/cluster/heartbeat.c
Log:
o simplify heartbeat thread, needs teardown help from usysfs
o replace racey hb callback state and locking with straight-forward sems
Modified: trunk/fs/ocfs2/cluster/heartbeat.c
===================================================================
--- trunk/fs/ocfs2/cluster/heartbeat.c 2005-02-01 20:34:36 UTC (rev 1875)
+++ trunk/fs/ocfs2/cluster/heartbeat.c 2005-02-01 22:24:55 UTC (rev 1876)
@@ -94,9 +94,7 @@
static int hb_do_disk_heartbeat(void *page);
static int hb_thread(void *data);
static void hb_complete_thread(void);
-static void hb_kick_thread(void);
static int hb_launch_thread(void);
-static inline int hb_wait_on_callback_state(int type);
@@ -104,17 +102,14 @@
static spinlock_t hb_lock = SPIN_LOCK_UNLOCKED;
static LIST_HEAD(hb_net_groups);
static LIST_HEAD(hb_disk_groups);
-static int hb_callback_state[HB_NUM_CB];
-struct list_head hb_callbacks[HB_NUM_CB];
-static spinlock_t hb_cb_lock = SPIN_LOCK_UNLOCKED;
static struct task_struct *hb_task = NULL;
-static atomic_t hb_thread_woken = ATOMIC_INIT(0);
-static DECLARE_WAIT_QUEUE_HEAD(hb_thread_wait_queue);
static struct completion hb_complete;
static int hb_pid = -1;
-static wait_queue_head_t hb_cb_wq;
-static atomic_t hb_cb_ready = ATOMIC_INIT(0);
+static struct hb_callback {
+ struct list_head list;
+ struct semaphore sem;
+} hb_callbacks[HB_NUM_CB];
//#if 0
#define hbprintk(x, arg...) printk("(hb:%d) " x, current->pid, ##arg)
@@ -376,7 +371,6 @@
static int hb_thread(void *data)
{
- int status;
void *page;
page = (void *) __get_free_page(GFP_KERNEL);
@@ -387,25 +381,11 @@
hb_task = current;
while (1) {
- status = hb_do_disk_heartbeat(page);
-
- atomic_set(&hb_thread_woken, 0);
- status = util_wait_atomic_eq(&hb_thread_wait_queue,
- &hb_thread_woken,
- 1, HB_THREAD_MS);
-
- if (status == 0 || status == -ETIMEDOUT) {
-#if 0
- if (atomic_read(&hb_thread_woken))
- hbprintk("aha!!! hb thread woken!\n");
- else
- hbprintk("hb thread timed out waiting, "
- "running again\n");
-#endif
- continue;
- }
- hbprintk("hb thread got %d while waiting\n", status);
- break;
+ hb_do_disk_heartbeat(page);
+ /* when we can really tear down this can wait on a wait
+ * queue */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(HB_THREAD_MS));
}
flush_scheduled_work();
@@ -414,13 +394,6 @@
return 0;
}
-
-static void hb_kick_thread(void)
-{
- atomic_set(&hb_thread_woken, 1);
- wake_up(&hb_thread_wait_queue);
-}
-
/* Launch the hb thread for the mounted volume */
static int hb_launch_thread(void)
{
@@ -773,25 +746,6 @@
spin_unlock(&hb_lock);
}
-enum {
- HB_CB_STATE_FROZEN = 0,
- HB_CB_STATE_READY
-};
-
-static inline int hb_wait_on_callback_state(int type)
-{
- while (hb_callback_state[type] == HB_CB_STATE_FROZEN) {
- spin_unlock(&hb_cb_lock);
- atomic_set(&hb_cb_ready, 0);
- if (util_wait_atomic_eq(&hb_cb_wq, &hb_cb_ready, 1, 0) ==
- -EINTR) {
- return -EINTR;
- }
- spin_lock(&hb_cb_lock);
- }
- return 0;
-}
-
static int __init init_hb(void)
{
int retval=-1, i;
@@ -805,11 +759,10 @@
//if (!hb_net_timestamps)
// goto done;
- for (i=HB_NODE_DOWN_CB; i<HB_NUM_CB; i++)
- INIT_LIST_HEAD(&hb_callbacks[i]);
- init_waitqueue_head(&hb_cb_wq);
- for (i=HB_NODE_DOWN_CB; i<HB_NUM_CB; i++)
- hb_callback_state[i] = HB_CB_STATE_READY;
+ for (i=HB_NODE_DOWN_CB; i<HB_NUM_CB; i++) {
+ INIT_LIST_HEAD(&hb_callbacks[i].list);
+ init_MUTEX(&hb_callbacks[i].sem);
+ }
if (nm_register_callback(NM_GROUP_NODE_DEL_CB, hb_nm_group_node_del_cb))
goto done;
@@ -832,14 +785,6 @@
static void __exit exit_hb(void)
{
- int i;
- spin_lock(&hb_cb_lock);
- for (i=HB_NODE_DOWN_CB; i<HB_NUM_CB; i++) {
- hb_wait_on_callback_state(i);
- hb_callback_state[i] = HB_CB_STATE_FROZEN;
- }
- spin_unlock(&hb_cb_lock);
-
hb_complete_thread();
hb_teardown();
unregister_filesystem(&hb_fs_type);
@@ -853,6 +798,9 @@
nm_unregister_callback(NM_NODE_ADD_CB, hb_nm_node_add_cb);
nm_unregister_callback(NM_GROUP_ADD_CB, hb_nm_group_add_cb);
remove_proc_entry("cluster/heartbeat", NULL);
+
+ /* XXX make sure that we're not being called from any more active
+ * nm callbacks, then teardown hb_callbacks */
//if (hb_net_timestamps)
// kfree(hb_net_timestamps);
}
@@ -897,14 +845,25 @@
}
EXPORT_SYMBOL(hb_fill_node_map);
+static struct hb_callback *hbcall_from_type(int type)
+{
+ if (type < HB_NODE_DOWN_CB || type >= HB_NUM_CB)
+ return ERR_PTR(-EINVAL);
+
+ return &hb_callbacks[type];
+}
+
int hb_register_callback(int type, hb_cb_func *func, void *data, int priority)
{
hb_callback_func *f, *tmp;
struct list_head *iter;
+ struct hb_callback *hbcall;
int ret;
- if (type < HB_NODE_DOWN_CB || type >= HB_NUM_CB)
- return -EINVAL;
+ hbcall = hbcall_from_type(type);
+ if (IS_ERR(hbcall))
+ return PTR_ERR(hbcall);
+
f = kmalloc(sizeof(hb_callback_func), GFP_KERNEL);
if (f == NULL)
return -ENOMEM;
@@ -912,26 +871,28 @@
f->func = func;
f->data = data;
f->priority = priority;
+ INIT_LIST_HEAD(&f->list);
- spin_lock(&hb_cb_lock);
- ret = hb_wait_on_callback_state(type);
- if (ret < 0) {
- spin_unlock(&hb_cb_lock);
- kfree(f);
- return ret;
- }
+ ret = down_interruptible(&hbcall->sem);
+ if (ret)
+ goto out;
- list_for_each(iter, &hb_callbacks[type]) {
+ list_for_each(iter, &hbcall->list) {
tmp = list_entry (iter, hb_callback_func, list);
if (priority < tmp->priority) {
list_add_tail(&f->list, iter);
- spin_unlock(&hb_cb_lock);
- return 0;
+ break;
}
}
- list_add_tail(&f->list, &hb_callbacks[type]);
- spin_unlock(&hb_cb_lock);
- return 0;
+ if (list_empty(&f->list))
+ list_add_tail(&f->list, &hbcall->list);
+
+ f = NULL;
+ up(&hbcall->sem);
+out:
+ if (f)
+ kfree(f);
+ return ret;
}
EXPORT_SYMBOL(hb_register_callback);
@@ -939,64 +900,55 @@
{
struct list_head *iter, *tmpiter;
int ret = -EINVAL;
- hb_callback_func *f;
+ hb_callback_func *f = NULL;
+ struct hb_callback *hbcall;
- if (type < HB_NODE_DOWN_CB || type >= HB_NUM_CB)
- return -EINVAL;
+ hbcall = hbcall_from_type(type);
+ if (IS_ERR(hbcall))
+ return PTR_ERR(hbcall);
- spin_lock(&hb_cb_lock);
- ret = hb_wait_on_callback_state(type);
- if (ret < 0) {
- spin_unlock(&hb_cb_lock);
- return ret;
- }
- hb_callback_state[type] = HB_CB_STATE_FROZEN;
- spin_unlock(&hb_cb_lock);
+ ret = down_interruptible(&hbcall->sem);
+ if (ret)
+ goto out;
- list_for_each_safe(iter, tmpiter, &hb_callbacks[type]) {
+ list_for_each_safe(iter, tmpiter, &hbcall->list) {
f = list_entry (iter, hb_callback_func, list);
if (f->func == func && f->data == data) {
list_del(&f->list);
- kfree(f);
ret = 0;
break;
}
}
- spin_lock(&hb_cb_lock);
- hb_callback_state[type] = HB_CB_STATE_READY;
- atomic_set(&hb_cb_ready, 1);
- wake_up(&hb_cb_wq);
- spin_unlock(&hb_cb_lock);
+ up(&hbcall->sem);
+
+out:
+ if (f)
+ kfree(f);
+
return ret;
}
EXPORT_SYMBOL(hb_unregister_callback);
-
static void hb_do_callbacks(int type, void *ptr1, void *ptr2, int idx)
{
struct list_head *iter;
hb_callback_func *f;
- int ret;
-
- spin_lock(&hb_cb_lock);
- ret = hb_wait_on_callback_state(type);
- if (ret < 0) {
- spin_unlock(&hb_cb_lock);
+ struct hb_callback *hbcall;
+
+ hbcall = hbcall_from_type(type);
+ if (IS_ERR(hbcall))
+ return;
+
+ if (down_interruptible(&hbcall->sem)) {
hbprintk("missed hb callback(%d) due to EINTR!\n", type);
return;
}
- hb_callback_state[type] = HB_CB_STATE_FROZEN;
- spin_unlock(&hb_cb_lock);
-
- list_for_each(iter, &hb_callbacks[type]) {
+
+ list_for_each(iter, &hbcall->list) {
f = list_entry (iter, hb_callback_func, list);
(f->func) (ptr1, ptr2, idx, f->data);
}
- spin_lock(&hb_cb_lock);
- hb_callback_state[type] = HB_CB_STATE_READY;
- atomic_set(&hb_cb_ready, 1);
- wake_up(&hb_cb_wq);
- spin_unlock(&hb_cb_lock);
+ up(&hbcall->sem);
}
More information about the Ocfs2-commits
mailing list