[Ocfs2-commits] manish commits r2352 - in trunk/fs/ocfs2: . cluster
dlm
svn-commits at oss.oracle.com
svn-commits at oss.oracle.com
Wed Jun 1 18:43:33 CDT 2005
Author: manish
Signed-off-by: mfasheh
Date: 2005-06-01 18:43:31 -0500 (Wed, 01 Jun 2005)
New Revision: 2352
Modified:
trunk/fs/ocfs2/cluster/heartbeat.c
trunk/fs/ocfs2/cluster/heartbeat.h
trunk/fs/ocfs2/cluster/nodemanager.c
trunk/fs/ocfs2/cluster/ocfs2_heartbeat.h
trunk/fs/ocfs2/cluster/tcp.c
trunk/fs/ocfs2/dlm/dlmcommon.h
trunk/fs/ocfs2/dlm/dlmdomain.c
trunk/fs/ocfs2/heartbeat.c
trunk/fs/ocfs2/ocfs.h
trunk/fs/ocfs2/super.c
Log:
Move heartbeat functions into our own namespace
Signed-off-by: mfasheh
Modified: trunk/fs/ocfs2/cluster/heartbeat.c
===================================================================
--- trunk/fs/ocfs2/cluster/heartbeat.c 2005-06-01 23:37:15 UTC (rev 2351)
+++ trunk/fs/ocfs2/cluster/heartbeat.c 2005-06-01 23:43:31 UTC (rev 2352)
@@ -42,39 +42,39 @@
* we've made sure that all callees can deal with being called concurrently
* from multiple hb region threads.
*/
-static DECLARE_RWSEM(hb_callback_sem);
+static DECLARE_RWSEM(o2hb_callback_sem);
/*
* multiple hb threads are watching multiple regions. A node is live
* whenever any of the threads sees activity from the node in its region.
*/
-static spinlock_t hb_live_lock = SPIN_LOCK_UNLOCKED;
-static struct list_head hb_live_slots[NM_MAX_NODES];
-static unsigned long hb_live_node_bitmap[BITS_TO_LONGS(NM_MAX_NODES)];
-static LIST_HEAD(hb_node_events);
+static spinlock_t o2hb_live_lock = SPIN_LOCK_UNLOCKED;
+static struct list_head o2hb_live_slots[NM_MAX_NODES];
+static unsigned long o2hb_live_node_bitmap[BITS_TO_LONGS(NM_MAX_NODES)];
+static LIST_HEAD(o2hb_node_events);
-static DECLARE_WAIT_QUEUE_HEAD(hb_steady_queue);
+static DECLARE_WAIT_QUEUE_HEAD(o2hb_steady_queue);
-static struct hb_callback {
+static struct o2hb_callback {
struct list_head list;
-} hb_callbacks[HB_NUM_CB];
+} o2hb_callbacks[O2HB_NUM_CB];
-static struct hb_callback *hbcall_from_type(enum hb_callback_type type);
+static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type);
-#define HB_THREAD_MS 2000 // every 2 seconds
+#define O2HB_THREAD_MS 2000 // every 2 seconds
-#define HB_DEFAULT_BLOCK_BITS 9
+#define O2HB_DEFAULT_BLOCK_BITS 9
-struct hb_node_event {
+struct o2hb_node_event {
struct list_head hn_item;
- enum hb_callback_type hn_event_type;
+ enum o2hb_callback_type hn_event_type;
struct nm_node *hn_node;
int hn_node_num;
};
-struct hb_disk_slot {
- struct hb_disk_heartbeat_block *ds_raw_block;
+struct o2hb_disk_slot {
+ struct o2hb_disk_heartbeat_block *ds_raw_block;
u8 ds_node_num;
unsigned long ds_last_time;
u16 ds_equal_samples;
@@ -84,7 +84,7 @@
/* each thread owns a region.. when we're asked to tear down the region
* we ask the thread to stop, who cleans up the region */
-struct hb_region {
+struct o2hb_region {
struct config_item hr_item;
/* protected by the hr_callback_sem */
@@ -101,7 +101,7 @@
struct page **hr_slot_data;
struct block_device *hr_bdev;
- struct hb_disk_slot *hr_slots;
+ struct o2hb_disk_slot *hr_slots;
/* let the person setting up hb wait for it to return until it
* has reached a 'steady' state. This will be fixed when we have
@@ -111,21 +111,21 @@
char hr_dev_name[BDEVNAME_SIZE];
};
-struct hb_bio_wait_ctxt {
+struct o2hb_bio_wait_ctxt {
atomic_t wc_num_reqs;
struct completion wc_io_complete;
};
-static inline void hb_bio_wait_init(struct hb_bio_wait_ctxt *wc,
- unsigned int num_ios)
+static inline void o2hb_bio_wait_init(struct o2hb_bio_wait_ctxt *wc,
+ unsigned int num_ios)
{
atomic_set(&wc->wc_num_reqs, num_ios);
init_completion(&wc->wc_io_complete);
}
/* Used in error paths too */
-static inline void hb_bio_wait_dec(struct hb_bio_wait_ctxt *wc,
- unsigned int num)
+static inline void o2hb_bio_wait_dec(struct o2hb_bio_wait_ctxt *wc,
+ unsigned int num)
{
/* sadly atomic_sub_and_test() isn't available on all platforms. The
* good news is that the fast path only completes one at a time */
@@ -137,8 +137,8 @@
}
}
-static void hb_wait_on_io(struct hb_region *reg,
- struct hb_bio_wait_ctxt *wc)
+static void o2hb_wait_on_io(struct o2hb_region *reg,
+ struct o2hb_bio_wait_ctxt *wc)
{
struct address_space *mapping = reg->hr_bdev->bd_inode->i_mapping;
@@ -147,11 +147,11 @@
wait_for_completion(&wc->wc_io_complete);
}
-static int hb_bio_end_io(struct bio *bio,
- unsigned int bytes_done,
- int error)
+static int o2hb_bio_end_io(struct bio *bio,
+ unsigned int bytes_done,
+ int error)
{
- struct hb_bio_wait_ctxt *wc = bio->bi_private;
+ struct o2hb_bio_wait_ctxt *wc = bio->bi_private;
if (error)
mlog(ML_ERROR, "IO Error %d\n", error);
@@ -159,16 +159,16 @@
if (bio->bi_size)
return 1;
- hb_bio_wait_dec(wc, 1);
+ o2hb_bio_wait_dec(wc, 1);
return 0;
}
/* Setup a Bio to cover I/O against num_slots slots starting at
* start_slot. */
-static struct bio * hb_setup_one_bio(struct hb_region *reg,
- struct hb_bio_wait_ctxt *wc,
- unsigned int start_slot,
- unsigned int num_slots)
+static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
+ struct o2hb_bio_wait_ctxt *wc,
+ unsigned int start_slot,
+ unsigned int num_slots)
{
int i, nr_vecs, len, first_page, last_page;
unsigned int vec_len, vec_start;
@@ -190,7 +190,7 @@
bio->bi_sector = (reg->hr_start_block + start_slot) << (bits - 9);
bio->bi_bdev = reg->hr_bdev;
bio->bi_private = wc;
- bio->bi_end_io = hb_bio_end_io;
+ bio->bi_end_io = o2hb_bio_end_io;
first_page = start_slot / spp;
last_page = first_page + nr_vecs;
@@ -255,10 +255,10 @@
return pow_two_sectors;
}
-static inline void hb_compute_request_limits(struct hb_region *reg,
- unsigned int num_slots,
- unsigned int *num_bios,
- unsigned int *slots_per_bio)
+static inline void o2hb_compute_request_limits(struct o2hb_region *reg,
+ unsigned int num_slots,
+ unsigned int *num_bios,
+ unsigned int *slots_per_bio)
{
unsigned int max_sectors, io_sectors;
@@ -276,16 +276,16 @@
*num_bios, *slots_per_bio);
}
-static int hb_read_slots(struct hb_region *reg,
- unsigned int max_slots)
+static int o2hb_read_slots(struct o2hb_region *reg,
+ unsigned int max_slots)
{
unsigned int num_bios, slots_per_bio, start_slot, num_slots;
int i, status;
- struct hb_bio_wait_ctxt wc;
+ struct o2hb_bio_wait_ctxt wc;
struct bio **bios;
struct bio *bio;
- hb_compute_request_limits(reg, max_slots, &num_bios, &slots_per_bio);
+ o2hb_compute_request_limits(reg, max_slots, &num_bios, &slots_per_bio);
bios = kcalloc(num_bios, sizeof(struct bio *), GFP_KERNEL);
if (!bios) {
@@ -294,7 +294,7 @@
return status;
}
- hb_bio_wait_init(&wc, num_bios);
+ o2hb_bio_wait_init(&wc, num_bios);
num_slots = slots_per_bio;
for(i = 0; i < num_bios; i++) {
@@ -304,9 +304,9 @@
if (max_slots < (start_slot + num_slots))
num_slots = max_slots - start_slot;
- bio = hb_setup_one_bio(reg, &wc, start_slot, num_slots);
+ bio = o2hb_setup_one_bio(reg, &wc, start_slot, num_slots);
if (IS_ERR(bio)) {
- hb_bio_wait_dec(&wc, num_bios - i);
+ o2hb_bio_wait_dec(&wc, num_bios - i);
status = PTR_ERR(bio);
mlog_errno(status);
@@ -319,8 +319,9 @@
}
status = 0;
+
bail_and_wait:
- hb_wait_on_io(reg, &wc);
+ o2hb_wait_on_io(reg, &wc);
if (bios) {
for(i = 0; i < num_bios; i++)
@@ -332,19 +333,19 @@
return status;
}
-static int hb_issue_node_write(struct hb_region *reg,
- struct bio **write_bio,
- struct hb_bio_wait_ctxt *write_wc)
+static int o2hb_issue_node_write(struct o2hb_region *reg,
+ struct bio **write_bio,
+ struct o2hb_bio_wait_ctxt *write_wc)
{
int status;
unsigned int slot;
struct bio *bio;
- hb_bio_wait_init(write_wc, 1);
+ o2hb_bio_wait_init(write_wc, 1);
slot = nm_this_node();
- bio = hb_setup_one_bio(reg, write_wc, slot, 1);
+ bio = o2hb_setup_one_bio(reg, write_wc, slot, 1);
if (IS_ERR(bio)) {
status = PTR_ERR(bio);
mlog_errno(status);
@@ -362,11 +363,11 @@
/* We want to make sure that nobody is heartbeating on top of us --
* this will help detect an invalid configuration. */
-static int hb_check_last_timestamp(struct hb_region *reg)
+static int o2hb_check_last_timestamp(struct o2hb_region *reg)
{
int node_num, ret;
- struct hb_disk_slot *slot;
- struct hb_disk_heartbeat_block *hb_block;
+ struct o2hb_disk_slot *slot;
+ struct o2hb_disk_heartbeat_block *hb_block;
node_num = nm_this_node();
@@ -383,17 +384,17 @@
return ret;
}
-static inline void hb_set_local_node_timestamp(struct hb_region *reg)
+static inline void o2hb_set_local_node_timestamp(struct o2hb_region *reg)
{
int node_num;
u64 cputime;
- struct hb_disk_slot *slot;
- struct hb_disk_heartbeat_block *hb_block;
+ struct o2hb_disk_slot *slot;
+ struct o2hb_disk_heartbeat_block *hb_block;
node_num = nm_this_node();
slot = ®->hr_slots[node_num];
- hb_block = (struct hb_disk_heartbeat_block *)slot->ds_raw_block;
+ hb_block = (struct o2hb_disk_heartbeat_block *)slot->ds_raw_block;
memset(hb_block, 0, reg->hr_block_bytes);
/* TODO: time stuff */
cputime = CURRENT_TIME.tv_sec;
@@ -403,48 +404,48 @@
hb_block->hb_node = node_num;
}
-static void hb_fire_callbacks(struct hb_callback *hbcall,
- struct nm_node *node,
- int idx)
+static void o2hb_fire_callbacks(struct o2hb_callback *hbcall,
+ struct nm_node *node,
+ int idx)
{
struct list_head *iter;
- struct hb_callback_func *f;
+ struct o2hb_callback_func *f;
list_for_each(iter, &hbcall->list) {
- f = list_entry(iter, struct hb_callback_func, hc_item);
+ f = list_entry(iter, struct o2hb_callback_func, hc_item);
(f->hc_func)(node, idx, f->hc_data);
}
}
/* Will run the list in order until we process the passed event */
-static void hb_run_event_list(struct hb_node_event *queued_event)
+static void o2hb_run_event_list(struct o2hb_node_event *queued_event)
{
int empty;
- struct hb_callback *hbcall;
- struct hb_node_event *event;
+ struct o2hb_callback *hbcall;
+ struct o2hb_node_event *event;
- spin_lock(&hb_live_lock);
+ spin_lock(&o2hb_live_lock);
empty = list_empty(&queued_event->hn_item);
- spin_unlock(&hb_live_lock);
+ spin_unlock(&o2hb_live_lock);
if (empty)
return;
/* Holding callback sem assures we don't alter the callback
* lists when doing this, and serializes ourselves with other
* processes wanting callbacks. */
- down_write(&hb_callback_sem);
+ down_write(&o2hb_callback_sem);
- spin_lock(&hb_live_lock);
- while (!list_empty(&hb_node_events)
+ spin_lock(&o2hb_live_lock);
+ while (!list_empty(&o2hb_node_events)
&& !list_empty(&queued_event->hn_item)) {
- event = list_entry(hb_node_events.next,
- struct hb_node_event,
+ event = list_entry(o2hb_node_events.next,
+ struct o2hb_node_event,
hn_item);
list_del_init(&event->hn_item);
- spin_unlock(&hb_live_lock);
+ spin_unlock(&o2hb_live_lock);
mlog(ML_HEARTBEAT, "Node %s event for %d\n",
- event->hn_event_type == HB_NODE_UP_CB ? "UP" : "DOWN",
+ event->hn_event_type == O2HB_NODE_UP_CB ? "UP" : "DOWN",
event->hn_node_num);
hbcall = hbcall_from_type(event->hn_event_type);
@@ -454,35 +455,35 @@
* to recover from. */
BUG_ON(IS_ERR(hbcall));
- hb_fire_callbacks(hbcall, event->hn_node, event->hn_node_num);
+ o2hb_fire_callbacks(hbcall, event->hn_node, event->hn_node_num);
- spin_lock(&hb_live_lock);
+ spin_lock(&o2hb_live_lock);
}
- spin_unlock(&hb_live_lock);
+ spin_unlock(&o2hb_live_lock);
- up_write(&hb_callback_sem);
+ up_write(&o2hb_callback_sem);
}
-static void hb_queue_node_event(struct hb_node_event *event,
- enum hb_callback_type type,
- struct nm_node *node,
- int node_num)
+static void o2hb_queue_node_event(struct o2hb_node_event *event,
+ enum o2hb_callback_type type,
+ struct nm_node *node,
+ int node_num)
{
- assert_spin_locked(&hb_live_lock);
+ assert_spin_locked(&o2hb_live_lock);
event->hn_event_type = type;
event->hn_node = node;
event->hn_node_num = node_num;
mlog(ML_HEARTBEAT, "Queue node %s event for node %d\n",
- type == HB_NODE_UP_CB ? "UP" : "DOWN", node_num);
+ type == O2HB_NODE_UP_CB ? "UP" : "DOWN", node_num);
- list_add_tail(&event->hn_item, &hb_node_events);
+ list_add_tail(&event->hn_item, &o2hb_node_events);
}
-static void hb_shutdown_slot(struct hb_disk_slot *slot)
+static void o2hb_shutdown_slot(struct o2hb_disk_slot *slot)
{
- struct hb_node_event event =
+ struct o2hb_node_event event =
{ .hn_item = LIST_HEAD_INIT(event.hn_item), };
struct nm_node *node;
@@ -490,34 +491,34 @@
if (!node)
return;
- spin_lock(&hb_live_lock);
+ spin_lock(&o2hb_live_lock);
if (!list_empty(&slot->ds_live_item)) {
mlog(ML_HEARTBEAT, "Shutdown, node %d leaves region\n",
slot->ds_node_num);
list_del_init(&slot->ds_live_item);
- if (list_empty(&hb_live_slots[slot->ds_node_num])) {
- clear_bit(slot->ds_node_num, hb_live_node_bitmap);
+ if (list_empty(&o2hb_live_slots[slot->ds_node_num])) {
+ clear_bit(slot->ds_node_num, o2hb_live_node_bitmap);
- hb_queue_node_event(&event, HB_NODE_DOWN_CB, node,
- slot->ds_node_num);
+ o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB, node,
+ slot->ds_node_num);
}
}
- spin_unlock(&hb_live_lock);
+ spin_unlock(&o2hb_live_lock);
- hb_run_event_list(&event);
+ o2hb_run_event_list(&event);
nm_node_put(node);
}
-static int hb_check_slot(struct hb_disk_slot *slot)
+static int o2hb_check_slot(struct o2hb_disk_slot *slot)
{
int changed = 0;
- struct hb_node_event event =
+ struct o2hb_node_event event =
{ .hn_item = LIST_HEAD_INIT(event.hn_item), };
struct nm_node *node;
- struct hb_disk_heartbeat_block *hb_block = slot->ds_raw_block;
+ struct o2hb_disk_heartbeat_block *hb_block = slot->ds_raw_block;
u64 cputime;
/* Is this correct? Do we assume that the node doesn't exist
@@ -535,26 +536,26 @@
slot->ds_equal_samples++;
slot->ds_last_time = cputime;
- spin_lock(&hb_live_lock);
+ spin_lock(&o2hb_live_lock);
/* dead nodes only come to life after some number of
* changes at any time during their dead time */
if (list_empty(&slot->ds_live_item) &&
- slot->ds_changed_samples >= HB_LIVE_THRESHOLD) {
+ slot->ds_changed_samples >= O2HB_LIVE_THRESHOLD) {
mlog(ML_HEARTBEAT, "Node %d joined my region\n",
slot->ds_node_num);
/* first on the list generates a callback */
- if (list_empty(&hb_live_slots[slot->ds_node_num])) {
- set_bit(slot->ds_node_num, hb_live_node_bitmap);
+ if (list_empty(&o2hb_live_slots[slot->ds_node_num])) {
+ set_bit(slot->ds_node_num, o2hb_live_node_bitmap);
- hb_queue_node_event(&event, HB_NODE_UP_CB, node,
- slot->ds_node_num);
+ o2hb_queue_node_event(&event, O2HB_NODE_UP_CB, node,
+ slot->ds_node_num);
changed = 1;
}
list_add_tail(&slot->ds_live_item,
- &hb_live_slots[slot->ds_node_num]);
+ &o2hb_live_slots[slot->ds_node_num]);
slot->ds_equal_samples = 0;
goto out;
@@ -567,17 +568,17 @@
/* live nodes only go dead after enough consequtive missed
* samples.. reset the missed counter whenever we see
* activity */
- if (slot->ds_equal_samples >= HB_DEAD_THRESHOLD) {
+ if (slot->ds_equal_samples >= O2HB_DEAD_THRESHOLD) {
mlog(ML_HEARTBEAT, "Node %d left my region\n",
slot->ds_node_num);
/* last off the live_slot generates a callback */
list_del_init(&slot->ds_live_item);
- if (list_empty(&hb_live_slots[slot->ds_node_num])) {
- clear_bit(slot->ds_node_num, hb_live_node_bitmap);
+ if (list_empty(&o2hb_live_slots[slot->ds_node_num])) {
+ clear_bit(slot->ds_node_num, o2hb_live_node_bitmap);
- hb_queue_node_event(&event, HB_NODE_DOWN_CB, node,
- slot->ds_node_num);
+ o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB, node,
+ slot->ds_node_num);
changed = 1;
}
@@ -590,9 +591,9 @@
slot->ds_equal_samples = 0;
}
out:
- spin_unlock(&hb_live_lock);
+ spin_unlock(&o2hb_live_lock);
- hb_run_event_list(&event);
+ o2hb_run_event_list(&event);
nm_node_put(node);
return changed;
@@ -600,8 +601,8 @@
/* This could be faster if we just implmented a find_last_bit, but I
* don't think the circumstances warrant it. */
-static int hb_highest_node(unsigned long *nodes,
- int numbits)
+static int o2hb_highest_node(unsigned long *nodes,
+ int numbits)
{
int highest, node;
@@ -617,17 +618,17 @@
return highest;
}
-static void hb_do_disk_heartbeat(struct hb_region *reg)
+static void o2hb_do_disk_heartbeat(struct o2hb_region *reg)
{
int i, ret, highest_node, change = 0;
unsigned long configured_nodes[BITS_TO_LONGS(NM_MAX_NODES)];
struct bio *write_bio;
- struct hb_bio_wait_ctxt write_wc;
+ struct o2hb_bio_wait_ctxt write_wc;
if (nm_configured_node_map(configured_nodes, sizeof(configured_nodes)))
return;
- highest_node = hb_highest_node(configured_nodes, NM_MAX_NODES);
+ highest_node = o2hb_highest_node(configured_nodes, NM_MAX_NODES);
if (highest_node >= NM_MAX_NODES) {
mlog(ML_NOTICE, "ocfs2_heartbeat: no configured nodes found!\n");
return;
@@ -637,7 +638,7 @@
* yet. Of course, if the node definitions have holes in them
* then we're reading an empty slot anyway... Consider this
* best-effort. */
- ret = hb_read_slots(reg, highest_node + 1);
+ ret = o2hb_read_slots(reg, highest_node + 1);
if (ret < 0) {
mlog_errno(ret);
return;
@@ -646,16 +647,16 @@
/* With an up to date view of the slots, we can check that no
* other node has been improperly configured to heartbeat in
* our slot. */
- if (!hb_check_last_timestamp(reg))
+ if (!o2hb_check_last_timestamp(reg))
mlog(ML_ERROR, "Device \"%s\": another node is heartbeating "
"in our slot!\n", reg->hr_dev_name);
/* Set our raw timestamp */
- hb_set_local_node_timestamp(reg);
+ o2hb_set_local_node_timestamp(reg);
/* And fire off the write. Note that we don't wait on this I/O
* until later. */
- ret = hb_issue_node_write(reg, &write_bio, &write_wc);
+ ret = o2hb_issue_node_write(reg, &write_bio, &write_wc);
if (ret < 0) {
mlog_errno(ret);
return;
@@ -664,17 +665,17 @@
i = -1;
while((i = find_next_bit(configured_nodes, NM_MAX_NODES, i + 1)) < NM_MAX_NODES) {
- change |= hb_check_slot(®->hr_slots[i]);
+ change |= o2hb_check_slot(®->hr_slots[i]);
}
/* let the person who launched us know when things are steady */
if (!change && (atomic_read(®->hr_steady_iterations) != 0)) {
if (atomic_dec_and_test(®->hr_steady_iterations))
- wake_up(&hb_steady_queue);
+ wake_up(&o2hb_steady_queue);
}
/* Make sure the write hits disk before we return. */
- hb_wait_on_io(reg, &write_wc);
+ o2hb_wait_on_io(reg, &write_wc);
bio_put(write_bio);
}
@@ -683,79 +684,79 @@
* dir is removed and drops it ref it will wait to tear down this
* thread.
*/
-static int hb_thread(void *data)
+static int o2hb_thread(void *data)
{
- struct hb_region *reg = data;
+ struct o2hb_region *reg = data;
int i;
mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread running\n");
while (!kthread_should_stop()) {
- hb_do_disk_heartbeat(reg);
+ o2hb_do_disk_heartbeat(reg);
/* the kthread api has blocked signals for us so no
* need to record the return value. */
- msleep_interruptible(HB_THREAD_MS);
+ msleep_interruptible(O2HB_THREAD_MS);
}
for(i = 0; i < reg->hr_blocks; i++)
- hb_shutdown_slot(®->hr_slots[i]);
+ o2hb_shutdown_slot(®->hr_slots[i]);
mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread exiting\n");
return 0;
}
-void hb_init(void)
+void o2hb_init(void)
{
int i;
- for (i = 0; i < ARRAY_SIZE(hb_callbacks); i++)
- INIT_LIST_HEAD(&hb_callbacks[i].list);
+ for (i = 0; i < ARRAY_SIZE(o2hb_callbacks); i++)
+ INIT_LIST_HEAD(&o2hb_callbacks[i].list);
- for (i = 0; i < ARRAY_SIZE(hb_live_slots); i++)
- INIT_LIST_HEAD(&hb_live_slots[i]);
+ for (i = 0; i < ARRAY_SIZE(o2hb_live_slots); i++)
+ INIT_LIST_HEAD(&o2hb_live_slots[i]);
- INIT_LIST_HEAD(&hb_node_events);
+ INIT_LIST_HEAD(&o2hb_node_events);
- memset(hb_live_node_bitmap, 0, sizeof(hb_live_node_bitmap));
+ memset(o2hb_live_node_bitmap, 0, sizeof(o2hb_live_node_bitmap));
}
/*
* get a map of all nodes that are heartbeating in any regions
*/
-void hb_fill_node_map(unsigned long *map, unsigned bytes)
+void o2hb_fill_node_map(unsigned long *map, unsigned bytes)
{
BUG_ON(bytes < (BITS_TO_LONGS(NM_MAX_NODES) * sizeof(unsigned long)));
/* callers want to serialize this map and callbacks so that they
* can trust that they don't miss nodes coming to the party */
- down_read(&hb_callback_sem);
- spin_lock(&hb_live_lock);
- memcpy(map, &hb_live_node_bitmap, bytes);
- spin_unlock(&hb_live_lock);
- up_read(&hb_callback_sem);
+ down_read(&o2hb_callback_sem);
+ spin_lock(&o2hb_live_lock);
+ memcpy(map, &o2hb_live_node_bitmap, bytes);
+ spin_unlock(&o2hb_live_lock);
+ up_read(&o2hb_callback_sem);
}
-EXPORT_SYMBOL_GPL(hb_fill_node_map);
+EXPORT_SYMBOL_GPL(o2hb_fill_node_map);
/*
* heartbeat configfs bits. The heartbeat set is a default set under
* the cluster set in nodemanager.c.
*/
-static struct hb_region *to_hb_region(struct config_item *item)
+static struct o2hb_region *to_o2hb_region(struct config_item *item)
{
- return item ? container_of(item, struct hb_region, hr_item) : NULL;
+ return item ? container_of(item, struct o2hb_region, hr_item) : NULL;
}
/* drop_item only drops its ref after killing the thread, nothing should
* be using the region anymore. this has to clean up any state that
* attributes might have built up. */
-static void hb_region_release(struct config_item *item)
+static void o2hb_region_release(struct config_item *item)
{
int i;
struct page *page;
- struct hb_region *reg = to_hb_region(item);
+ struct o2hb_region *reg = to_o2hb_region(item);
if (reg->hr_slot_data) {
for (i = 0; i < reg->hr_num_pages; i++) {
@@ -774,11 +775,11 @@
kfree(reg);
}
-static int hb_read_block_input(struct hb_region *reg,
- const char *page,
- size_t count,
- unsigned long *ret_bytes,
- unsigned int *ret_bits)
+static int o2hb_read_block_input(struct o2hb_region *reg,
+ const char *page,
+ size_t count,
+ unsigned long *ret_bytes,
+ unsigned int *ret_bits)
{
unsigned long bytes;
char *p = (char *)page;
@@ -801,14 +802,15 @@
return 0;
}
-static ssize_t hb_region_block_bytes_read(struct hb_region *reg, char *page)
+static ssize_t o2hb_region_block_bytes_read(struct o2hb_region *reg,
+ char *page)
{
return sprintf(page, "%u\n", reg->hr_block_bytes);
}
-static ssize_t hb_region_block_bytes_write(struct hb_region *reg,
- const char *page,
- size_t count)
+static ssize_t o2hb_region_block_bytes_write(struct o2hb_region *reg,
+ const char *page,
+ size_t count)
{
int status;
unsigned long block_bytes;
@@ -817,8 +819,8 @@
if (reg->hr_bdev)
return -EINVAL;
- status = hb_read_block_input(reg, page, count,
- &block_bytes, &block_bits);
+ status = o2hb_read_block_input(reg, page, count,
+ &block_bytes, &block_bits);
if (status)
return status;
@@ -828,14 +830,15 @@
return count;
}
-static ssize_t hb_region_start_block_read(struct hb_region *reg, char *page)
+static ssize_t o2hb_region_start_block_read(struct o2hb_region *reg,
+ char *page)
{
return sprintf(page, "%llu\n", reg->hr_start_block);
}
-static ssize_t hb_region_start_block_write(struct hb_region *reg,
- const char *page,
- size_t count)
+static ssize_t o2hb_region_start_block_write(struct o2hb_region *reg,
+ const char *page,
+ size_t count)
{
unsigned long long tmp;
char *p = (char *)page;
@@ -852,14 +855,15 @@
return count;
}
-static ssize_t hb_region_blocks_read(struct hb_region *reg, char *page)
+static ssize_t o2hb_region_blocks_read(struct o2hb_region *reg,
+ char *page)
{
return sprintf(page, "%d\n", reg->hr_blocks);
}
-static ssize_t hb_region_blocks_write(struct hb_region *reg,
- const char *page,
- size_t count)
+static ssize_t o2hb_region_blocks_write(struct o2hb_region *reg,
+ const char *page,
+ size_t count)
{
unsigned long tmp;
char *p = (char *)page;
@@ -879,7 +883,8 @@
return count;
}
-static ssize_t hb_region_dev_read(struct hb_region *reg, char *page)
+static ssize_t o2hb_region_dev_read(struct o2hb_region *reg,
+ char *page)
{
unsigned int ret = 0;
@@ -889,7 +894,7 @@
return ret;
}
-static void hb_init_region_params(struct hb_region *reg)
+static void o2hb_init_region_params(struct o2hb_region *reg)
{
reg->hr_slots_per_page = PAGE_CACHE_SIZE >> reg->hr_block_bits;
@@ -899,17 +904,17 @@
reg->hr_block_bytes, reg->hr_block_bits);
}
-static int hb_map_slot_data(struct hb_region *reg)
+static int o2hb_map_slot_data(struct o2hb_region *reg)
{
int i, j;
unsigned int last_slot;
unsigned int spp = reg->hr_slots_per_page;
struct page *page;
char *raw;
- struct hb_disk_slot *slot;
+ struct o2hb_disk_slot *slot;
reg->hr_slots = kcalloc(reg->hr_blocks,
- sizeof(struct hb_disk_slot), GFP_KERNEL);
+ sizeof(struct o2hb_disk_slot), GFP_KERNEL);
if (reg->hr_slots == NULL) {
mlog_errno(-ENOMEM);
return -ENOMEM;
@@ -923,8 +928,9 @@
}
reg->hr_num_pages = (reg->hr_blocks + spp - 1) / spp;
- mlog(ML_HEARTBEAT, "Going to require %u pages to cover %u blocks at %u blocks "
- "per page\n", reg->hr_num_pages, reg->hr_blocks, spp);
+ mlog(ML_HEARTBEAT, "Going to require %u pages to cover %u blocks "
+ "at %u blocks per page\n",
+ reg->hr_num_pages, reg->hr_blocks, spp);
reg->hr_slot_data = kcalloc(reg->hr_num_pages, sizeof(struct page *),
GFP_KERNEL);
@@ -951,7 +957,7 @@
slot = ®->hr_slots[j + last_slot];
slot->ds_raw_block =
- (struct hb_disk_heartbeat_block *) raw;
+ (struct o2hb_disk_heartbeat_block *) raw;
raw += reg->hr_block_bytes;
}
@@ -961,8 +967,9 @@
}
/* this is acting as commit; we set up all of hr_bdev and hr_task or nothing */
-static ssize_t hb_region_dev_write(struct hb_region *reg, const char *page,
- size_t count)
+static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
+ const char *page,
+ size_t count)
{
long fd;
int sectsize;
@@ -1015,9 +1022,9 @@
goto out;
}
- hb_init_region_params(reg);
+ o2hb_init_region_params(reg);
- ret = hb_map_slot_data(reg);
+ ret = o2hb_map_slot_data(reg);
if (ret) {
mlog_errno(ret);
goto out;
@@ -1025,7 +1032,7 @@
atomic_set(®->hr_steady_iterations, 2);
- reg->hr_task = kthread_run(hb_thread, reg, "hb-%s",
+ reg->hr_task = kthread_run(o2hb_thread, reg, "o2hb-%s",
reg->hr_item.ci_name);
if (IS_ERR(reg->hr_task)) {
ret = PTR_ERR(reg->hr_task);
@@ -1034,7 +1041,7 @@
goto out;
}
- ret = wait_event_interruptible(hb_steady_queue,
+ ret = wait_event_interruptible(o2hb_steady_queue,
atomic_read(®->hr_steady_iterations) == 0);
if (ret) {
kthread_stop(reg->hr_task);
@@ -1056,106 +1063,118 @@
}
return ret;
}
-struct hb_region_attribute {
+
+struct o2hb_region_attribute {
struct configfs_attribute attr;
- ssize_t (*show)(struct hb_region *, char *);
- ssize_t (*store)(struct hb_region *, const char *, size_t);
+ ssize_t (*show)(struct o2hb_region *, char *);
+ ssize_t (*store)(struct o2hb_region *, const char *, size_t);
};
-static struct hb_region_attribute hb_region_attr_block_bytes = {
- .attr = { .ca_owner = THIS_MODULE, .ca_name = "block_bytes", .ca_mode = S_IRUGO | S_IWUSR },
- .show = hb_region_block_bytes_read,
- .store = hb_region_block_bytes_write,
+static struct o2hb_region_attribute o2hb_region_attr_block_bytes = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "block_bytes",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = o2hb_region_block_bytes_read,
+ .store = o2hb_region_block_bytes_write,
};
-static struct hb_region_attribute hb_region_attr_start_block = {
- .attr = { .ca_owner = THIS_MODULE, .ca_name = "start_block", .ca_mode = S_IRUGO | S_IWUSR },
- .show = hb_region_start_block_read,
- .store = hb_region_start_block_write,
+
+static struct o2hb_region_attribute o2hb_region_attr_start_block = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "start_block",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = o2hb_region_start_block_read,
+ .store = o2hb_region_start_block_write,
};
-static struct hb_region_attribute hb_region_attr_blocks = {
- .attr = { .ca_owner = THIS_MODULE, .ca_name = "blocks", .ca_mode = S_IRUGO | S_IWUSR },
- .show = hb_region_blocks_read,
- .store = hb_region_blocks_write,
+
+static struct o2hb_region_attribute o2hb_region_attr_blocks = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "blocks",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = o2hb_region_blocks_read,
+ .store = o2hb_region_blocks_write,
};
-static struct hb_region_attribute hb_region_attr_dev = {
- .attr = { .ca_owner = THIS_MODULE, .ca_name = "dev", .ca_mode = S_IRUGO | S_IWUSR },
- .show = hb_region_dev_read,
- .store = hb_region_dev_write,
+
+static struct o2hb_region_attribute o2hb_region_attr_dev = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "dev",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = o2hb_region_dev_read,
+ .store = o2hb_region_dev_write,
};
-static struct configfs_attribute *hb_region_attrs[] = {
- &hb_region_attr_block_bytes.attr,
- &hb_region_attr_start_block.attr,
- &hb_region_attr_blocks.attr,
- &hb_region_attr_dev.attr,
+static struct configfs_attribute *o2hb_region_attrs[] = {
+ &o2hb_region_attr_block_bytes.attr,
+ &o2hb_region_attr_start_block.attr,
+ &o2hb_region_attr_blocks.attr,
+ &o2hb_region_attr_dev.attr,
NULL,
};
-static ssize_t hb_region_show(struct config_item *item,
- struct configfs_attribute *attr,
- char *page)
+static ssize_t o2hb_region_show(struct config_item *item,
+ struct configfs_attribute *attr,
+ char *page)
{
- struct hb_region *reg = to_hb_region(item);
- struct hb_region_attribute *hb_region_attr =
- container_of(attr, struct hb_region_attribute, attr);
+ struct o2hb_region *reg = to_o2hb_region(item);
+ struct o2hb_region_attribute *o2hb_region_attr =
+ container_of(attr, struct o2hb_region_attribute, attr);
ssize_t ret = 0;
- if (hb_region_attr->show)
- ret = hb_region_attr->show(reg, page);
+ if (o2hb_region_attr->show)
+ ret = o2hb_region_attr->show(reg, page);
return ret;
}
-static ssize_t hb_region_store(struct config_item *item,
- struct configfs_attribute *attr,
- const char *page, size_t count)
+static ssize_t o2hb_region_store(struct config_item *item,
+ struct configfs_attribute *attr,
+ const char *page, size_t count)
{
- struct hb_region *reg = to_hb_region(item);
- struct hb_region_attribute *hb_region_attr =
- container_of(attr, struct hb_region_attribute, attr);
+ struct o2hb_region *reg = to_o2hb_region(item);
+ struct o2hb_region_attribute *o2hb_region_attr =
+ container_of(attr, struct o2hb_region_attribute, attr);
ssize_t ret = -EINVAL;
- if (hb_region_attr->store)
- ret = hb_region_attr->store(reg, page, count);
+ if (o2hb_region_attr->store)
+ ret = o2hb_region_attr->store(reg, page, count);
return ret;
}
-static struct configfs_item_operations hb_region_item_ops = {
- .release = hb_region_release,
- .show_attribute = hb_region_show,
- .store_attribute = hb_region_store,
+static struct configfs_item_operations o2hb_region_item_ops = {
+ .release = o2hb_region_release,
+ .show_attribute = o2hb_region_show,
+ .store_attribute = o2hb_region_store,
};
-static struct config_item_type hb_region_type = {
- .ct_item_ops = &hb_region_item_ops,
- .ct_attrs = hb_region_attrs,
+static struct config_item_type o2hb_region_type = {
+ .ct_item_ops = &o2hb_region_item_ops,
+ .ct_attrs = o2hb_region_attrs,
.ct_owner = THIS_MODULE,
};
/* heartbeat set */
-struct hb_heartbeat_group {
+struct o2hb_heartbeat_group {
struct config_group hs_group;
/* some stuff? */
};
-static struct hb_heartbeat_group *to_hb_heartbeat_group(struct config_group *group)
+static struct o2hb_heartbeat_group *to_o2hb_heartbeat_group(struct config_group *group)
{
return group ?
- container_of(group, struct hb_heartbeat_group, hs_group)
+ container_of(group, struct o2hb_heartbeat_group, hs_group)
: NULL;
}
-static struct config_item *hb_heartbeat_group_make_item(struct config_group *group,
- const char *name)
+static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *group,
+ const char *name)
{
- struct hb_region *reg = NULL;
+ struct o2hb_region *reg = NULL;
struct config_item *ret = NULL;
- reg = kcalloc(1, sizeof(struct hb_region), GFP_KERNEL);
+ reg = kcalloc(1, sizeof(struct o2hb_region), GFP_KERNEL);
if (reg == NULL)
goto out; /* ENOMEM */
- config_item_init_type_name(®->hr_item, name, &hb_region_type);
+ config_item_init_type_name(®->hr_item, name, &o2hb_region_type);
ret = ®->hr_item;
out:
@@ -1165,10 +1184,10 @@
return ret;
}
-static void hb_heartbeat_group_drop_item(struct config_group *group,
- struct config_item *item)
+static void o2hb_heartbeat_group_drop_item(struct config_group *group,
+ struct config_item *item)
{
- struct hb_region *reg = to_hb_region(item);
+ struct o2hb_region *reg = to_o2hb_region(item);
/* stop the thread when the user removes the region dir */
if (reg->hr_task) {
@@ -1179,29 +1198,29 @@
config_item_put(item);
}
-static struct configfs_group_operations hb_heartbeat_group_group_ops = {
- .make_item = hb_heartbeat_group_make_item,
- .drop_item = hb_heartbeat_group_drop_item,
+static struct configfs_group_operations o2hb_heartbeat_group_group_ops = {
+ .make_item = o2hb_heartbeat_group_make_item,
+ .drop_item = o2hb_heartbeat_group_drop_item,
};
-static struct config_item_type hb_heartbeat_group_type = {
- .ct_group_ops = &hb_heartbeat_group_group_ops,
+static struct config_item_type o2hb_heartbeat_group_type = {
+ .ct_group_ops = &o2hb_heartbeat_group_group_ops,
.ct_owner = THIS_MODULE,
};
/* this is just here to avoid touching group in heartbeat.h which the
* entire damn world #includes */
-struct config_group *hb_alloc_hb_set(void)
+struct config_group *o2hb_alloc_hb_set(void)
{
- struct hb_heartbeat_group *hs = NULL;
+ struct o2hb_heartbeat_group *hs = NULL;
struct config_group *ret = NULL;
- hs = kcalloc(1, sizeof(struct hb_heartbeat_group), GFP_KERNEL);
+ hs = kcalloc(1, sizeof(struct o2hb_heartbeat_group), GFP_KERNEL);
if (hs == NULL)
goto out;
config_group_init_type_name(&hs->hs_group, "heartbeat",
- &hb_heartbeat_group_type);
+ &o2hb_heartbeat_group_type);
ret = &hs->hs_group;
out:
@@ -1210,54 +1229,54 @@
return ret;
}
-void hb_free_hb_set(struct config_group *group)
+void o2hb_free_hb_set(struct config_group *group)
{
- struct hb_heartbeat_group *hs = to_hb_heartbeat_group(group);
+ struct o2hb_heartbeat_group *hs = to_o2hb_heartbeat_group(group);
kfree(hs);
}
/* hb callback registration and issueing */
-static struct hb_callback *hbcall_from_type(enum hb_callback_type type)
+static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type)
{
- if (type == HB_NUM_CB)
+ if (type == O2HB_NUM_CB)
return ERR_PTR(-EINVAL);
- return &hb_callbacks[type];
+ return &o2hb_callbacks[type];
}
-void hb_setup_callback(struct hb_callback_func *hc,
- enum hb_callback_type type,
- hb_cb_func *func,
- void *data,
- int priority)
+void o2hb_setup_callback(struct o2hb_callback_func *hc,
+ enum o2hb_callback_type type,
+ o2hb_cb_func *func,
+ void *data,
+ int priority)
{
INIT_LIST_HEAD(&hc->hc_item);
hc->hc_func = func;
hc->hc_data = data;
hc->hc_priority = priority;
hc->hc_type = type;
- hc->hc_magic = HB_CB_MAGIC;
+ hc->hc_magic = O2HB_CB_MAGIC;
}
-EXPORT_SYMBOL_GPL(hb_setup_callback);
+EXPORT_SYMBOL_GPL(o2hb_setup_callback);
-int hb_register_callback(struct hb_callback_func *hc)
+int o2hb_register_callback(struct o2hb_callback_func *hc)
{
- struct hb_callback_func *tmp;
+ struct o2hb_callback_func *tmp;
struct list_head *iter;
- struct hb_callback *hbcall;
+ struct o2hb_callback *hbcall;
- BUG_ON(hc->hc_magic != HB_CB_MAGIC);
+ BUG_ON(hc->hc_magic != O2HB_CB_MAGIC);
BUG_ON(!list_empty(&hc->hc_item));
hbcall = hbcall_from_type(hc->hc_type);
if (IS_ERR(hbcall))
return PTR_ERR(hbcall);
- down_write(&hb_callback_sem);
+ down_write(&o2hb_callback_sem);
list_for_each(iter, &hbcall->list) {
- tmp = list_entry(iter, struct hb_callback_func, hc_item);
+ tmp = list_entry(iter, struct o2hb_callback_func, hc_item);
if (hc->hc_priority < tmp->hc_priority) {
list_add_tail(&hc->hc_item, iter);
break;
@@ -1266,32 +1285,32 @@
if (list_empty(&hc->hc_item))
list_add_tail(&hc->hc_item, &hbcall->list);
- up_write(&hb_callback_sem);
+ up_write(&o2hb_callback_sem);
return 0;
}
-EXPORT_SYMBOL_GPL(hb_register_callback);
+EXPORT_SYMBOL_GPL(o2hb_register_callback);
-int hb_unregister_callback(struct hb_callback_func *hc)
+int o2hb_unregister_callback(struct o2hb_callback_func *hc)
{
- BUG_ON(hc->hc_magic != HB_CB_MAGIC);
+ BUG_ON(hc->hc_magic != O2HB_CB_MAGIC);
if (list_empty(&hc->hc_item))
return 0;
- down_write(&hb_callback_sem);
+ down_write(&o2hb_callback_sem);
list_del_init(&hc->hc_item);
- up_write(&hb_callback_sem);
+ up_write(&o2hb_callback_sem);
return 0;
}
-EXPORT_SYMBOL_GPL(hb_unregister_callback);
+EXPORT_SYMBOL_GPL(o2hb_unregister_callback);
/* Makes sure our local node is configured with a node number, and is
* heartbeating. */
-int hb_check_local_node_heartbeating(void)
+int o2hb_check_local_node_heartbeating(void)
{
unsigned long testing_map[BITS_TO_LONGS(NM_MAX_NODES)];
u8 node_num;
@@ -1303,7 +1322,7 @@
return 0;
}
- hb_fill_node_map(testing_map, sizeof(testing_map));
+ o2hb_fill_node_map(testing_map, sizeof(testing_map));
if (!test_bit(node_num, testing_map)) {
mlog(ML_HEARTBEAT,
@@ -1314,4 +1333,4 @@
return 1;
}
-EXPORT_SYMBOL_GPL(hb_check_local_node_heartbeating);
+EXPORT_SYMBOL_GPL(o2hb_check_local_node_heartbeating);
Modified: trunk/fs/ocfs2/cluster/heartbeat.h
===================================================================
--- trunk/fs/ocfs2/cluster/heartbeat.h 2005-06-01 23:37:15 UTC (rev 2351)
+++ trunk/fs/ocfs2/cluster/heartbeat.h 2005-06-01 23:43:31 UTC (rev 2352)
@@ -29,45 +29,45 @@
#include "ocfs2_heartbeat.h"
-#define HB_CB_MAGIC 0x51d1e4ec
+#define O2HB_CB_MAGIC 0x51d1e4ec
/* callback stuff */
-enum hb_callback_type {
- HB_NODE_DOWN_CB = 0,
- HB_NODE_UP_CB,
- HB_NUM_CB
+enum o2hb_callback_type {
+ O2HB_NODE_DOWN_CB = 0,
+ O2HB_NODE_UP_CB,
+ O2HB_NUM_CB
};
struct nm_node;
-typedef void (hb_cb_func)(struct nm_node *, int, void *);
+typedef void (o2hb_cb_func)(struct nm_node *, int, void *);
-struct hb_callback_func {
+struct o2hb_callback_func {
u32 hc_magic;
struct list_head hc_item;
- hb_cb_func *hc_func;
+ o2hb_cb_func *hc_func;
void *hc_data;
int hc_priority;
- enum hb_callback_type hc_type;
+ enum o2hb_callback_type hc_type;
};
/* number of changes to be seen as live */
-#define HB_LIVE_THRESHOLD 2
+#define O2HB_LIVE_THRESHOLD 2
/* number of missed changes to be seen as dead */
-#define HB_DEAD_THRESHOLD 30
+#define O2HB_DEAD_THRESHOLD 30
-struct config_group *hb_alloc_hb_set(void);
-void hb_free_hb_set(struct config_group *group);
+struct config_group *o2hb_alloc_hb_set(void);
+void o2hb_free_hb_set(struct config_group *group);
-void hb_setup_callback(struct hb_callback_func *hc,
- enum hb_callback_type type,
- hb_cb_func *func,
- void *data,
- int priority);
-int hb_register_callback(struct hb_callback_func *hc);
-int hb_unregister_callback(struct hb_callback_func *hc);
-void hb_fill_node_map(unsigned long *map,
- unsigned bytes);
-void hb_init(void);
-int hb_check_local_node_heartbeating(void);
+void o2hb_setup_callback(struct o2hb_callback_func *hc,
+ enum o2hb_callback_type type,
+ o2hb_cb_func *func,
+ void *data,
+ int priority);
+int o2hb_register_callback(struct o2hb_callback_func *hc);
+int o2hb_unregister_callback(struct o2hb_callback_func *hc);
+void o2hb_fill_node_map(unsigned long *map,
+ unsigned bytes);
+void o2hb_init(void);
+int o2hb_check_local_node_heartbeating(void);
#endif /* CLUSTER_HEARTBEAT_H */
Modified: trunk/fs/ocfs2/cluster/nodemanager.c
===================================================================
--- trunk/fs/ocfs2/cluster/nodemanager.c 2005-06-01 23:37:15 UTC (rev 2351)
+++ trunk/fs/ocfs2/cluster/nodemanager.c 2005-06-01 23:43:31 UTC (rev 2352)
@@ -653,7 +653,7 @@
cluster = kcalloc(1, sizeof(struct nm_cluster), GFP_KERNEL);
ns = kcalloc(1, sizeof(struct nm_node_group), GFP_KERNEL);
defs = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL);
- hb_group = hb_alloc_hb_set();
+ hb_group = o2hb_alloc_hb_set();
if (cluster == NULL || ns == NULL || hb_group == NULL || defs == NULL)
goto out;
@@ -674,7 +674,7 @@
if (ret == NULL) {
kfree(cluster);
kfree(ns);
- hb_free_hb_set(hb_group);
+ o2hb_free_hb_set(hb_group);
kfree(defs);
}
@@ -789,7 +789,7 @@
cluster_print_version();
- hb_init();
+ o2hb_init();
ocfs2_table_header = register_sysctl_table(ocfs2_root_table, 0);
if (!ocfs2_table_header) {
Modified: trunk/fs/ocfs2/cluster/ocfs2_heartbeat.h
===================================================================
--- trunk/fs/ocfs2/cluster/ocfs2_heartbeat.h 2005-06-01 23:37:15 UTC (rev 2351)
+++ trunk/fs/ocfs2/cluster/ocfs2_heartbeat.h 2005-06-01 23:43:31 UTC (rev 2352)
@@ -26,7 +26,7 @@
#ifndef _OCFS2_HEARTBEAT_H
#define _OCFS2_HEARTBEAT_H
-struct hb_disk_heartbeat_block {
+struct o2hb_disk_heartbeat_block {
__u64 hb_seq;
__u8 hb_node;
__u8 hb_pad1[3];
Modified: trunk/fs/ocfs2/cluster/tcp.c
===================================================================
--- trunk/fs/ocfs2/cluster/tcp.c 2005-06-01 23:37:15 UTC (rev 2351)
+++ trunk/fs/ocfs2/cluster/tcp.c 2005-06-01 23:43:31 UTC (rev 2352)
@@ -1142,7 +1142,7 @@
o2net_detach_sc(NULL, node);
}
-static struct hb_callback_func *o2net_hb_down = NULL;
+static struct o2hb_callback_func *o2net_hb_down = NULL;
#define O2NET_HB_NODE_DOWN_PRI (0x1)
int o2net_register_hb_callbacks(void)
@@ -1152,9 +1152,10 @@
return -ENOMEM;
memset(o2net_hb_down, 0, sizeof(*o2net_hb_down));
- hb_setup_callback(o2net_hb_down, HB_NODE_DOWN_CB,
- o2net_hb_node_down_cb, NULL, O2NET_HB_NODE_DOWN_PRI);
- return hb_register_callback(o2net_hb_down);
+ o2hb_setup_callback(o2net_hb_down, O2HB_NODE_DOWN_CB,
+ o2net_hb_node_down_cb, NULL,
+ O2NET_HB_NODE_DOWN_PRI);
+ return o2hb_register_callback(o2net_hb_down);
}
void o2net_unregister_hb_callbacks(void)
@@ -1162,7 +1163,7 @@
int status;
if (o2net_hb_down) {
- status = hb_unregister_callback(o2net_hb_down);
+ status = o2hb_unregister_callback(o2net_hb_down);
if (status < 0)
mlog(ML_ERROR, "Status return %d unregistering "
"heartbeat callback!\n", status);
Modified: trunk/fs/ocfs2/dlm/dlmcommon.h
===================================================================
--- trunk/fs/ocfs2/dlm/dlmcommon.h 2005-06-01 23:37:15 UTC (rev 2351)
+++ trunk/fs/ocfs2/dlm/dlmcommon.h 2005-06-01 23:43:31 UTC (rev 2352)
@@ -119,8 +119,8 @@
dlm_ctxt_state dlm_state;
unsigned int num_joins;
- struct hb_callback_func dlm_hb_up;
- struct hb_callback_func dlm_hb_down;
+ struct o2hb_callback_func dlm_hb_up;
+ struct o2hb_callback_func dlm_hb_down;
struct task_struct *dlm_thread_task;
struct task_struct *dlm_reco_thread_task;
wait_queue_head_t dlm_thread_wq;
@@ -316,8 +316,8 @@
u8 new_master;
u8 error;
enum dlm_mle_type type; // BLOCK or MASTER
- struct hb_callback_func mle_hb_up;
- struct hb_callback_func mle_hb_down;
+ struct o2hb_callback_func mle_hb_up;
+ struct o2hb_callback_func mle_hb_down;
union {
dlm_lock_resource *res;
dlm_lock_name name;
Modified: trunk/fs/ocfs2/dlm/dlmdomain.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmdomain.c 2005-06-01 23:37:15 UTC (rev 2351)
+++ trunk/fs/ocfs2/dlm/dlmdomain.c 2005-06-01 23:43:31 UTC (rev 2352)
@@ -293,8 +293,8 @@
static void dlm_complete_dlm_shutdown(dlm_ctxt *dlm)
{
- hb_unregister_callback(&dlm->dlm_hb_up);
- hb_unregister_callback(&dlm->dlm_hb_down);
+ o2hb_unregister_callback(&dlm->dlm_hb_up);
+ o2hb_unregister_callback(&dlm->dlm_hb_down);
dlm_unregister_domain_handlers(dlm);
@@ -878,7 +878,7 @@
/* group sem locking should work for us here -- we're already
* registered for heartbeat events so filling this should be
* atomic wrt getting those handlers called. */
- hb_fill_node_map(dlm->live_nodes_map, sizeof(dlm->live_nodes_map));
+ o2hb_fill_node_map(dlm->live_nodes_map, sizeof(dlm->live_nodes_map));
spin_lock(&dlm->spinlock);
memcpy(ctxt->live_map, dlm->live_nodes_map, sizeof(ctxt->live_map));
@@ -960,15 +960,15 @@
mlog(0, "registering handlers.\n");
- hb_setup_callback(&dlm->dlm_hb_down, HB_NODE_DOWN_CB,
- dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI);
- status = hb_register_callback(&dlm->dlm_hb_down);
+ o2hb_setup_callback(&dlm->dlm_hb_down, O2HB_NODE_DOWN_CB,
+ dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI);
+ status = o2hb_register_callback(&dlm->dlm_hb_down);
if (status)
goto bail;
- hb_setup_callback(&dlm->dlm_hb_up, HB_NODE_UP_CB, dlm_hb_node_up_cb,
- dlm, DLM_HB_NODE_UP_PRI);
- status = hb_register_callback(&dlm->dlm_hb_up);
+ o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB,
+ dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI);
+ status = o2hb_register_callback(&dlm->dlm_hb_up);
if (status)
goto bail;
@@ -1247,7 +1247,7 @@
goto leave;
}
- if (!hb_check_local_node_heartbeating()) {
+ if (!o2hb_check_local_node_heartbeating()) {
mlog(ML_ERROR, "the local node has not ben configured, or is "
"not heartbeating\n");
goto leave;
Modified: trunk/fs/ocfs2/heartbeat.c
===================================================================
--- trunk/fs/ocfs2/heartbeat.c 2005-06-01 23:37:15 UTC (rev 2351)
+++ trunk/fs/ocfs2/heartbeat.c 2005-06-01 23:43:31 UTC (rev 2352)
@@ -104,11 +104,11 @@
void ocfs2_setup_hb_callbacks(ocfs_super *osb)
{
- hb_setup_callback(&osb->osb_hb_down, HB_NODE_DOWN_CB,
- ocfs2_hb_node_down_cb, osb, OCFS2_HB_NODE_DOWN_PRI);
+ o2hb_setup_callback(&osb->osb_hb_down, O2HB_NODE_DOWN_CB,
+ ocfs2_hb_node_down_cb, osb, OCFS2_HB_NODE_DOWN_PRI);
- hb_setup_callback(&osb->osb_hb_up, HB_NODE_UP_CB, ocfs2_hb_node_up_cb,
- osb, OCFS2_HB_NODE_UP_PRI);
+ o2hb_setup_callback(&osb->osb_hb_up, O2HB_NODE_UP_CB,
+ ocfs2_hb_node_up_cb, osb, OCFS2_HB_NODE_UP_PRI);
}
/* Most functions here are just stubs for now... */
@@ -116,13 +116,13 @@
{
int status;
- status = hb_register_callback(&osb->osb_hb_down);
+ status = o2hb_register_callback(&osb->osb_hb_down);
if (status < 0) {
mlog_errno(status);
goto bail;
}
- status = hb_register_callback(&osb->osb_hb_up);
+ status = o2hb_register_callback(&osb->osb_hb_up);
if (status < 0)
mlog_errno(status);
@@ -134,11 +134,11 @@
{
int status;
- status = hb_unregister_callback(&osb->osb_hb_down);
+ status = o2hb_unregister_callback(&osb->osb_hb_down);
if (status < 0)
mlog_errno(status);
- status = hb_unregister_callback(&osb->osb_hb_up);
+ status = o2hb_unregister_callback(&osb->osb_hb_up);
if (status < 0)
mlog_errno(status);
Modified: trunk/fs/ocfs2/ocfs.h
===================================================================
--- trunk/fs/ocfs2/ocfs.h 2005-06-01 23:37:15 UTC (rev 2351)
+++ trunk/fs/ocfs2/ocfs.h 2005-06-01 23:43:31 UTC (rev 2352)
@@ -318,8 +318,8 @@
unsigned int net_response_ids;
struct list_head net_response_list;
- struct hb_callback_func osb_hb_up;
- struct hb_callback_func osb_hb_down;
+ struct o2hb_callback_func osb_hb_up;
+ struct o2hb_callback_func osb_hb_down;
struct list_head osb_net_handlers;
Modified: trunk/fs/ocfs2/super.c
===================================================================
--- trunk/fs/ocfs2/super.c 2005-06-01 23:37:15 UTC (rev 2351)
+++ trunk/fs/ocfs2/super.c 2005-06-01 23:43:31 UTC (rev 2352)
@@ -343,7 +343,7 @@
/* for now we only have one cluster/node, make sure we see it
* in the heartbeat universe */
- if (!hb_check_local_node_heartbeating()) {
+ if (!o2hb_check_local_node_heartbeating()) {
status = -EINVAL;
goto read_super_error;
}
More information about the Ocfs2-commits
mailing list