[Ocfs2-commits] manish commits r2353 - in trunk/fs/ocfs2: . cluster
dlm
svn-commits at oss.oracle.com
svn-commits at oss.oracle.com
Wed Jun 1 19:59:26 CDT 2005
Author: manish
Signed-off-by: mfasheh
Date: 2005-06-01 19:59:25 -0500 (Wed, 01 Jun 2005)
New Revision: 2353
Modified:
trunk/fs/ocfs2/cluster/heartbeat.c
trunk/fs/ocfs2/cluster/heartbeat.h
trunk/fs/ocfs2/cluster/nodemanager.c
trunk/fs/ocfs2/cluster/nodemanager.h
trunk/fs/ocfs2/cluster/ocfs2_nodemanager.h
trunk/fs/ocfs2/cluster/tcp.c
trunk/fs/ocfs2/cluster/tcp.h
trunk/fs/ocfs2/cluster/tcp_internal.h
trunk/fs/ocfs2/dlm/dlmcommon.h
trunk/fs/ocfs2/dlm/dlmdebug.c
trunk/fs/ocfs2/dlm/dlmdomain.c
trunk/fs/ocfs2/dlm/dlmfs.c
trunk/fs/ocfs2/dlm/dlmmaster.c
trunk/fs/ocfs2/dlm/dlmrecovery.c
trunk/fs/ocfs2/dlm/dlmthread.c
trunk/fs/ocfs2/heartbeat.c
trunk/fs/ocfs2/journal.c
trunk/fs/ocfs2/proc.c
trunk/fs/ocfs2/slot_map.c
trunk/fs/ocfs2/super.c
trunk/fs/ocfs2/vote.c
Log:
Move nm functions into our own namespace
Signed-off-by: mfasheh
Modified: trunk/fs/ocfs2/cluster/heartbeat.c
===================================================================
--- trunk/fs/ocfs2/cluster/heartbeat.c 2005-06-01 23:43:31 UTC (rev 2352)
+++ trunk/fs/ocfs2/cluster/heartbeat.c 2005-06-02 00:59:25 UTC (rev 2353)
@@ -49,8 +49,8 @@
* whenever any of the threads sees activity from the node in its region.
*/
static spinlock_t o2hb_live_lock = SPIN_LOCK_UNLOCKED;
-static struct list_head o2hb_live_slots[NM_MAX_NODES];
-static unsigned long o2hb_live_node_bitmap[BITS_TO_LONGS(NM_MAX_NODES)];
+static struct list_head o2hb_live_slots[O2NM_MAX_NODES];
+static unsigned long o2hb_live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
static LIST_HEAD(o2hb_node_events);
@@ -69,7 +69,7 @@
struct o2hb_node_event {
struct list_head hn_item;
enum o2hb_callback_type hn_event_type;
- struct nm_node *hn_node;
+ struct o2nm_node *hn_node;
int hn_node_num;
};
@@ -343,7 +343,7 @@
o2hb_bio_wait_init(write_wc, 1);
- slot = nm_this_node();
+ slot = o2nm_this_node();
bio = o2hb_setup_one_bio(reg, write_wc, slot, 1);
if (IS_ERR(bio)) {
@@ -369,7 +369,7 @@
struct o2hb_disk_slot *slot;
struct o2hb_disk_heartbeat_block *hb_block;
- node_num = nm_this_node();
+ node_num = o2nm_this_node();
ret = 1;
slot = ®->hr_slots[node_num];
@@ -391,7 +391,7 @@
struct o2hb_disk_slot *slot;
struct o2hb_disk_heartbeat_block *hb_block;
- node_num = nm_this_node();
+ node_num = o2nm_this_node();
slot = ®->hr_slots[node_num];
hb_block = (struct o2hb_disk_heartbeat_block *)slot->ds_raw_block;
@@ -405,7 +405,7 @@
}
static void o2hb_fire_callbacks(struct o2hb_callback *hbcall,
- struct nm_node *node,
+ struct o2nm_node *node,
int idx)
{
struct list_head *iter;
@@ -466,7 +466,7 @@
static void o2hb_queue_node_event(struct o2hb_node_event *event,
enum o2hb_callback_type type,
- struct nm_node *node,
+ struct o2nm_node *node,
int node_num)
{
assert_spin_locked(&o2hb_live_lock);
@@ -485,9 +485,9 @@
{
struct o2hb_node_event event =
{ .hn_item = LIST_HEAD_INIT(event.hn_item), };
- struct nm_node *node;
+ struct o2nm_node *node;
- node = nm_get_node_by_num(slot->ds_node_num);
+ node = o2nm_get_node_by_num(slot->ds_node_num);
if (!node)
return;
@@ -509,7 +509,7 @@
o2hb_run_event_list(&event);
- nm_node_put(node);
+ o2nm_node_put(node);
}
static int o2hb_check_slot(struct o2hb_disk_slot *slot)
@@ -517,13 +517,13 @@
int changed = 0;
struct o2hb_node_event event =
{ .hn_item = LIST_HEAD_INIT(event.hn_item), };
- struct nm_node *node;
+ struct o2nm_node *node;
struct o2hb_disk_heartbeat_block *hb_block = slot->ds_raw_block;
u64 cputime;
/* Is this correct? Do we assume that the node doesn't exist
* if we're not configured for him? */
- node = nm_get_node_by_num(slot->ds_node_num);
+ node = o2nm_get_node_by_num(slot->ds_node_num);
if (!node)
return 0;
@@ -595,7 +595,7 @@
o2hb_run_event_list(&event);
- nm_node_put(node);
+ o2nm_node_put(node);
return changed;
}
@@ -621,15 +621,15 @@
static void o2hb_do_disk_heartbeat(struct o2hb_region *reg)
{
int i, ret, highest_node, change = 0;
- unsigned long configured_nodes[BITS_TO_LONGS(NM_MAX_NODES)];
+ unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)];
struct bio *write_bio;
struct o2hb_bio_wait_ctxt write_wc;
- if (nm_configured_node_map(configured_nodes, sizeof(configured_nodes)))
+ if (o2nm_configured_node_map(configured_nodes, sizeof(configured_nodes)))
return;
- highest_node = o2hb_highest_node(configured_nodes, NM_MAX_NODES);
- if (highest_node >= NM_MAX_NODES) {
+ highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES);
+ if (highest_node >= O2NM_MAX_NODES) {
mlog(ML_NOTICE, "ocfs2_heartbeat: no configured nodes found!\n");
return;
}
@@ -663,7 +663,7 @@
}
i = -1;
- while((i = find_next_bit(configured_nodes, NM_MAX_NODES, i + 1)) < NM_MAX_NODES) {
+ while((i = find_next_bit(configured_nodes, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) {
change |= o2hb_check_slot(®->hr_slots[i]);
}
@@ -727,7 +727,7 @@
*/
void o2hb_fill_node_map(unsigned long *map, unsigned bytes)
{
- BUG_ON(bytes < (BITS_TO_LONGS(NM_MAX_NODES) * sizeof(unsigned long)));
+ BUG_ON(bytes < (BITS_TO_LONGS(O2NM_MAX_NODES) * sizeof(unsigned long)));
/* callers want to serialize this map and callbacks so that they
* can trust that they don't miss nodes coming to the party */
@@ -875,7 +875,7 @@
if (!p || (*p && (*p != '\n')))
return -EINVAL;
- if (tmp > NM_MAX_NODES || tmp == 0)
+ if (tmp > O2NM_MAX_NODES || tmp == 0)
return -ERANGE;
reg->hr_blocks = (unsigned int)tmp;
@@ -1312,12 +1312,12 @@
* heartbeating. */
int o2hb_check_local_node_heartbeating(void)
{
- unsigned long testing_map[BITS_TO_LONGS(NM_MAX_NODES)];
+ unsigned long testing_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
u8 node_num;
/* if this node was set then we have networking */
- node_num = nm_this_node();
- if (node_num == NM_MAX_NODES) {
+ node_num = o2nm_this_node();
+ if (node_num == O2NM_MAX_NODES) {
mlog(ML_HEARTBEAT, "this node has not been configured.\n");
return 0;
}
Modified: trunk/fs/ocfs2/cluster/heartbeat.h
===================================================================
--- trunk/fs/ocfs2/cluster/heartbeat.h 2005-06-01 23:43:31 UTC (rev 2352)
+++ trunk/fs/ocfs2/cluster/heartbeat.h 2005-06-02 00:59:25 UTC (rev 2353)
@@ -38,8 +38,8 @@
O2HB_NUM_CB
};
-struct nm_node;
-typedef void (o2hb_cb_func)(struct nm_node *, int, void *);
+struct o2nm_node;
+typedef void (o2hb_cb_func)(struct o2nm_node *, int, void *);
struct o2hb_callback_func {
u32 hc_magic;
Modified: trunk/fs/ocfs2/cluster/nodemanager.c
===================================================================
--- trunk/fs/ocfs2/cluster/nodemanager.c 2005-06-01 23:43:31 UTC (rev 2352)
+++ trunk/fs/ocfs2/cluster/nodemanager.c 2005-06-02 00:59:25 UTC (rev 2353)
@@ -34,7 +34,7 @@
/* for now we operate under the assertion that there can be only one
* cluster active at a time. Changing this will require trickling
* cluster references throughout where nodes are looked up */
-static struct nm_cluster *nm_single_cluster = NULL;
+static struct o2nm_cluster *o2nm_single_cluster = NULL;
#define OCFS2_MAX_HB_CTL_PATH 256
static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl";
@@ -90,43 +90,43 @@
static struct ctl_table_header *ocfs2_table_header = NULL;
-const char *nm_get_hb_ctl_path(void)
+const char *o2nm_get_hb_ctl_path(void)
{
return ocfs2_hb_ctl_path;
}
-EXPORT_SYMBOL_GPL(nm_get_hb_ctl_path);
+EXPORT_SYMBOL_GPL(o2nm_get_hb_ctl_path);
-struct nm_cluster {
+struct o2nm_cluster {
struct config_group cl_group;
unsigned cl_has_local:1;
u8 cl_local_node;
rwlock_t cl_nodes_lock;
- struct nm_node *cl_nodes[NM_MAX_NODES];
+ struct o2nm_node *cl_nodes[O2NM_MAX_NODES];
struct rb_root cl_node_ip_tree;
/* this bitmap is part of a hack for disk bitmap.. will go eventually. - zab */
- unsigned long cl_nodes_bitmap[BITS_TO_LONGS(NM_MAX_NODES)];
+ unsigned long cl_nodes_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
};
-struct nm_node * nm_get_node_by_num(u8 node_num)
+struct o2nm_node *o2nm_get_node_by_num(u8 node_num)
{
- struct nm_node *node = NULL;
+ struct o2nm_node *node = NULL;
- if (node_num >= NM_MAX_NODES || nm_single_cluster == NULL)
+ if (node_num >= O2NM_MAX_NODES || o2nm_single_cluster == NULL)
goto out;
- read_lock(&nm_single_cluster->cl_nodes_lock);
- node = nm_single_cluster->cl_nodes[node_num];
+ read_lock(&o2nm_single_cluster->cl_nodes_lock);
+ node = o2nm_single_cluster->cl_nodes[node_num];
if (node)
config_item_get(&node->nd_item);
- read_unlock(&nm_single_cluster->cl_nodes_lock);
+ read_unlock(&o2nm_single_cluster->cl_nodes_lock);
out:
return node;
}
-EXPORT_SYMBOL_GPL(nm_get_node_by_num);
+EXPORT_SYMBOL_GPL(o2nm_get_node_by_num);
-int nm_configured_node_map(unsigned long *map, unsigned bytes)
+int o2nm_configured_node_map(unsigned long *map, unsigned bytes)
{
- struct nm_cluster *cluster = nm_single_cluster;
+ struct o2nm_cluster *cluster = o2nm_single_cluster;
BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap)));
@@ -139,49 +139,49 @@
return 0;
}
-EXPORT_SYMBOL_GPL(nm_configured_node_map);
+EXPORT_SYMBOL_GPL(o2nm_configured_node_map);
-static struct nm_node * nm_node_ip_tree_lookup(struct nm_cluster *cluster,
- u32 ip_needle,
- struct rb_node ***ret_p,
- struct rb_node **ret_parent)
+static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster,
+ u32 ip_needle,
+ struct rb_node ***ret_p,
+ struct rb_node **ret_parent)
{
- struct rb_node **p = &cluster->cl_node_ip_tree.rb_node;
- struct rb_node *parent = NULL;
- struct nm_node *node, *ret = NULL;
+ struct rb_node **p = &cluster->cl_node_ip_tree.rb_node;
+ struct rb_node *parent = NULL;
+ struct o2nm_node *node, *ret = NULL;
- while (*p) {
- parent = *p;
- node = rb_entry(parent, struct nm_node, nd_ip_node);
+ while (*p) {
+ parent = *p;
+ node = rb_entry(parent, struct o2nm_node, nd_ip_node);
- if (ip_needle < node->nd_ipv4_address)
- p = &(*p)->rb_left;
- else if (ip_needle > node->nd_ipv4_address)
- p = &(*p)->rb_right;
- else {
+ if (ip_needle < node->nd_ipv4_address)
+ p = &(*p)->rb_left;
+ else if (ip_needle > node->nd_ipv4_address)
+ p = &(*p)->rb_right;
+ else {
ret = node;
- break;
+ break;
}
- }
-
- if (ret_p != NULL)
- *ret_p = p;
- if (ret_parent != NULL)
- *ret_parent = parent;
+ }
- return ret;
+ if (ret_p != NULL)
+ *ret_p = p;
+ if (ret_parent != NULL)
+ *ret_parent = parent;
+
+ return ret;
}
-struct nm_node * nm_get_node_by_ip(u32 addr)
+struct o2nm_node *o2nm_get_node_by_ip(u32 addr)
{
- struct nm_node *node = NULL;
- struct nm_cluster *cluster = nm_single_cluster;
+ struct o2nm_node *node = NULL;
+ struct o2nm_cluster *cluster = o2nm_single_cluster;
if (cluster == NULL)
goto out;
read_lock(&cluster->cl_nodes_lock);
- node = nm_node_ip_tree_lookup(cluster, addr, NULL, NULL);
+ node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL);
if (node)
config_item_get(&node->nd_item);
read_unlock(&cluster->cl_nodes_lock);
@@ -189,75 +189,75 @@
out:
return node;
}
-EXPORT_SYMBOL_GPL(nm_get_node_by_ip);
+EXPORT_SYMBOL_GPL(o2nm_get_node_by_ip);
-void nm_node_put(struct nm_node *node)
+void o2nm_node_put(struct o2nm_node *node)
{
config_item_put(&node->nd_item);
}
-EXPORT_SYMBOL_GPL(nm_node_put);
+EXPORT_SYMBOL_GPL(o2nm_node_put);
-void nm_node_get(struct nm_node *node)
+void o2nm_node_get(struct o2nm_node *node)
{
config_item_get(&node->nd_item);
}
-EXPORT_SYMBOL_GPL(nm_node_get);
+EXPORT_SYMBOL_GPL(o2nm_node_get);
-u8 nm_this_node(void)
+u8 o2nm_this_node(void)
{
- u8 node_num = NM_MAX_NODES;
+ u8 node_num = O2NM_MAX_NODES;
- if (nm_single_cluster && nm_single_cluster->cl_has_local)
- node_num = nm_single_cluster->cl_local_node;
+ if (o2nm_single_cluster && o2nm_single_cluster->cl_has_local)
+ node_num = o2nm_single_cluster->cl_local_node;
return node_num;
}
-EXPORT_SYMBOL_GPL(nm_this_node);
+EXPORT_SYMBOL_GPL(o2nm_this_node);
/* node configfs bits */
-static struct nm_cluster *to_nm_cluster(struct config_item *item)
+static struct o2nm_cluster *to_o2nm_cluster(struct config_item *item)
{
return item ?
- container_of(to_config_group(item), struct nm_cluster,
+ container_of(to_config_group(item), struct o2nm_cluster,
cl_group)
: NULL;
}
-static struct nm_node *to_nm_node(struct config_item *item)
+static struct o2nm_node *to_o2nm_node(struct config_item *item)
{
- return item ? container_of(item, struct nm_node, nd_item) : NULL;
+ return item ? container_of(item, struct o2nm_node, nd_item) : NULL;
}
-static void nm_node_release(struct config_item *item)
+static void o2nm_node_release(struct config_item *item)
{
- struct nm_node *node = to_nm_node(item);
+ struct o2nm_node *node = to_o2nm_node(item);
kfree(node);
}
-static ssize_t nm_node_num_read(struct nm_node *node, char *page)
+static ssize_t o2nm_node_num_read(struct o2nm_node *node, char *page)
{
return sprintf(page, "%d\n", node->nd_num);
}
-static struct nm_cluster *to_nm_cluster_from_node(struct nm_node *node)
+static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node)
{
/* through the first node_set .parent
- * mycluster/nodes/mynode == nm_cluster->nm_node_group->nm_node */
- return to_nm_cluster(node->nd_item.ci_parent->ci_parent);
+ * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */
+ return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
}
enum {
- NM_NODE_ATTR_NUM = 0,
- NM_NODE_ATTR_PORT,
- NM_NODE_ATTR_ADDRESS,
- NM_NODE_ATTR_LOCAL,
+ O2NM_NODE_ATTR_NUM = 0,
+ O2NM_NODE_ATTR_PORT,
+ O2NM_NODE_ATTR_ADDRESS,
+ O2NM_NODE_ATTR_LOCAL,
};
-static ssize_t nm_node_num_write(struct nm_node *node, const char *page,
- size_t count)
+static ssize_t o2nm_node_num_write(struct o2nm_node *node, const char *page,
+ size_t count)
{
- struct nm_cluster *cluster = to_nm_cluster_from_node(node);
+ struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
unsigned long tmp;
char *p = (char *)page;
@@ -265,15 +265,15 @@
if (!p || (*p && (*p != '\n')))
return -EINVAL;
- if (tmp >= NM_MAX_NODES)
+ if (tmp >= O2NM_MAX_NODES)
return -ERANGE;
/* once we're in the cl_nodes tree networking can look us up by
* node number and try to use our address and port attributes
* to connect to this node.. make sure that they've been set
* before writing the node attribute? */
- if (!test_bit(NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
- !test_bit(NM_NODE_ATTR_PORT, &node->nd_set_attributes))
+ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
+ !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
return -EINVAL; /* XXX */
write_lock(&cluster->cl_nodes_lock);
@@ -290,13 +290,13 @@
return count;
}
-static ssize_t nm_node_ipv4_port_read(struct nm_node *node, char *page)
+static ssize_t o2nm_node_ipv4_port_read(struct o2nm_node *node, char *page)
{
return sprintf(page, "%u\n", ntohs(node->nd_ipv4_port));
}
-static ssize_t nm_node_ipv4_port_write(struct nm_node *node, const char *page,
- size_t count)
+static ssize_t o2nm_node_ipv4_port_write(struct o2nm_node *node,
+ const char *page, size_t count)
{
unsigned long tmp;
char *p = (char *)page;
@@ -315,16 +315,16 @@
return count;
}
-static ssize_t nm_node_ipv4_address_read(struct nm_node *node, char *page)
+static ssize_t o2nm_node_ipv4_address_read(struct o2nm_node *node, char *page)
{
return sprintf(page, "%u.%u.%u.%u\n", NIPQUAD(node->nd_ipv4_address));
}
-static ssize_t nm_node_ipv4_address_write(struct nm_node *node,
- const char *page,
- size_t count)
+static ssize_t o2nm_node_ipv4_address_write(struct o2nm_node *node,
+ const char *page,
+ size_t count)
{
- struct nm_cluster *cluster = to_nm_cluster_from_node(node);
+ struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
int ret, i;
struct rb_node **p, *parent;
unsigned int octets[4];
@@ -344,10 +344,10 @@
ret = 0;
write_lock(&cluster->cl_nodes_lock);
- if (nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
+ if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
ret = -EEXIST;
else {
- rb_link_node(&node->nd_ip_node, parent, p);
+ rb_link_node(&node->nd_ip_node, parent, p);
rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
}
write_unlock(&cluster->cl_nodes_lock);
@@ -358,16 +358,16 @@
return count;
}
-static ssize_t nm_node_local_read(struct nm_node *node, char *page)
+
+static ssize_t o2nm_node_local_read(struct o2nm_node *node, char *page)
{
return sprintf(page, "%d\n", node->nd_local);
}
-static ssize_t nm_node_local_write(struct nm_node *node,
- const char *page,
- size_t count)
+static ssize_t o2nm_node_local_write(struct o2nm_node *node, const char *page,
+ size_t count)
{
- struct nm_cluster *cluster = to_nm_cluster_from_node(node);
+ struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
unsigned long tmp;
char *p = (char *)page;
ssize_t ret;
@@ -380,9 +380,9 @@
/* setting local turns on networking rx for now so we require having
* set everything else first */
- if (!test_bit(NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
- !test_bit(NM_NODE_ATTR_NUM, &node->nd_set_attributes) ||
- !test_bit(NM_NODE_ATTR_PORT, &node->nd_set_attributes))
+ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
+ !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) ||
+ !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
return -EINVAL; /* XXX */
/* the only failure case is trying to set a new local node
@@ -414,76 +414,88 @@
return count;
}
-struct nm_node_attribute {
+struct o2nm_node_attribute {
struct configfs_attribute attr;
- ssize_t (*show)(struct nm_node *, char *);
- ssize_t (*store)(struct nm_node *, const char *, size_t);
+ ssize_t (*show)(struct o2nm_node *, char *);
+ ssize_t (*store)(struct o2nm_node *, const char *, size_t);
};
-static struct nm_node_attribute nm_node_attr_num = {
- .attr = { .ca_owner = THIS_MODULE, .ca_name = "num", .ca_mode = S_IRUGO | S_IWUSR },
- .show = nm_node_num_read,
- .store = nm_node_num_write,
+static struct o2nm_node_attribute o2nm_node_attr_num = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "num",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = o2nm_node_num_read,
+ .store = o2nm_node_num_write,
};
-static struct nm_node_attribute nm_node_attr_ipv4_port = {
- .attr = { .ca_owner = THIS_MODULE, .ca_name = "ipv4_port", .ca_mode = S_IRUGO | S_IWUSR },
- .show = nm_node_ipv4_port_read,
- .store = nm_node_ipv4_port_write,
+
+static struct o2nm_node_attribute o2nm_node_attr_ipv4_port = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "ipv4_port",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = o2nm_node_ipv4_port_read,
+ .store = o2nm_node_ipv4_port_write,
};
-static struct nm_node_attribute nm_node_attr_ipv4_address = {
- .attr = { .ca_owner = THIS_MODULE, .ca_name = "ipv4_address", .ca_mode = S_IRUGO | S_IWUSR },
- .show = nm_node_ipv4_address_read,
- .store = nm_node_ipv4_address_write,
+
+static struct o2nm_node_attribute o2nm_node_attr_ipv4_address = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "ipv4_address",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = o2nm_node_ipv4_address_read,
+ .store = o2nm_node_ipv4_address_write,
};
-static struct nm_node_attribute nm_node_attr_local = {
- .attr = { .ca_owner = THIS_MODULE, .ca_name = "local", .ca_mode = S_IRUGO | S_IWUSR },
- .show = nm_node_local_read,
- .store = nm_node_local_write,
+
+static struct o2nm_node_attribute o2nm_node_attr_local = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "local",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = o2nm_node_local_read,
+ .store = o2nm_node_local_write,
};
-static struct configfs_attribute *nm_node_attrs[] = {
- [NM_NODE_ATTR_NUM] = &nm_node_attr_num.attr,
- [NM_NODE_ATTR_PORT] = &nm_node_attr_ipv4_port.attr,
- [NM_NODE_ATTR_ADDRESS] = &nm_node_attr_ipv4_address.attr,
- [NM_NODE_ATTR_LOCAL] = &nm_node_attr_local.attr,
+
+static struct configfs_attribute *o2nm_node_attrs[] = {
+ [O2NM_NODE_ATTR_NUM] = &o2nm_node_attr_num.attr,
+ [O2NM_NODE_ATTR_PORT] = &o2nm_node_attr_ipv4_port.attr,
+ [O2NM_NODE_ATTR_ADDRESS] = &o2nm_node_attr_ipv4_address.attr,
+ [O2NM_NODE_ATTR_LOCAL] = &o2nm_node_attr_local.attr,
NULL,
};
-static int nm_attr_index(struct configfs_attribute *attr)
+static int o2nm_attr_index(struct configfs_attribute *attr)
{
int i;
- for (i = 0; i < ARRAY_SIZE(nm_node_attrs); i++) {
- if (attr == nm_node_attrs[i])
+ for (i = 0; i < ARRAY_SIZE(o2nm_node_attrs); i++) {
+ if (attr == o2nm_node_attrs[i])
return i;
}
BUG();
return 0;
}
-static ssize_t nm_node_show(struct config_item *item,
- struct configfs_attribute *attr,
- char *page)
+static ssize_t o2nm_node_show(struct config_item *item,
+ struct configfs_attribute *attr,
+ char *page)
{
- struct nm_node *node = to_nm_node(item);
- struct nm_node_attribute *nm_node_attr =
- container_of(attr, struct nm_node_attribute, attr);
+ struct o2nm_node *node = to_o2nm_node(item);
+ struct o2nm_node_attribute *o2nm_node_attr =
+ container_of(attr, struct o2nm_node_attribute, attr);
ssize_t ret = 0;
- if (nm_node_attr->show)
- ret = nm_node_attr->show(node, page);
+ if (o2nm_node_attr->show)
+ ret = o2nm_node_attr->show(node, page);
return ret;
}
-static ssize_t nm_node_store(struct config_item *item,
- struct configfs_attribute *attr,
- const char *page, size_t count)
+static ssize_t o2nm_node_store(struct config_item *item,
+ struct configfs_attribute *attr,
+ const char *page, size_t count)
{
- struct nm_node *node = to_nm_node(item);
- struct nm_node_attribute *nm_node_attr =
- container_of(attr, struct nm_node_attribute, attr);
+ struct o2nm_node *node = to_o2nm_node(item);
+ struct o2nm_node_attribute *o2nm_node_attr =
+ container_of(attr, struct o2nm_node_attribute, attr);
ssize_t ret;
- int attr_index = nm_attr_index(attr);
+ int attr_index = o2nm_attr_index(attr);
- if (nm_node_attr->store == NULL) {
+ if (o2nm_node_attr->store == NULL) {
ret = -EINVAL;
goto out;
}
@@ -491,7 +503,7 @@
if (test_bit(attr_index, &node->nd_set_attributes))
return -EBUSY;
- ret = nm_node_attr->store(node, page, count);
+ ret = o2nm_node_attr->store(node, page, count);
if (ret < count)
goto out;
@@ -500,49 +512,49 @@
return ret;
}
-static struct configfs_item_operations nm_node_item_ops = {
- .release = nm_node_release,
- .show_attribute = nm_node_show,
- .store_attribute = nm_node_store,
+static struct configfs_item_operations o2nm_node_item_ops = {
+ .release = o2nm_node_release,
+ .show_attribute = o2nm_node_show,
+ .store_attribute = o2nm_node_store,
};
-static struct config_item_type nm_node_type = {
- .ct_item_ops = &nm_node_item_ops,
- .ct_attrs = nm_node_attrs,
+static struct config_item_type o2nm_node_type = {
+ .ct_item_ops = &o2nm_node_item_ops,
+ .ct_attrs = o2nm_node_attrs,
.ct_owner = THIS_MODULE,
};
/* node set */
-struct nm_node_group {
+struct o2nm_node_group {
struct config_group ns_group;
/* some stuff? */
};
#if 0
-static struct nm_node_group *to_nm_node_group(struct config_group *group)
+static struct o2nm_node_group *to_o2nm_node_group(struct config_group *group)
{
return group ?
- container_of(group, struct nm_node_group, ns_group)
+ container_of(group, struct o2nm_node_group, ns_group)
: NULL;
}
#endif
-static struct config_item *nm_node_group_make_item(struct config_group *group,
- const char *name)
+static struct config_item *o2nm_node_group_make_item(struct config_group *group,
+ const char *name)
{
- struct nm_node *node = NULL;
+ struct o2nm_node *node = NULL;
struct config_item *ret = NULL;
- if (strlen(name) > NM_MAX_NAME_LEN)
+ if (strlen(name) > O2NM_MAX_NAME_LEN)
goto out; /* ENAMETOOLONG */
- node = kcalloc(1, sizeof(struct nm_node), GFP_KERNEL);
+ node = kcalloc(1, sizeof(struct o2nm_node), GFP_KERNEL);
if (node == NULL)
goto out; /* ENOMEM */
strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */
- config_item_init_type_name(&node->nd_item, name, &nm_node_type);
+ config_item_init_type_name(&node->nd_item, name, &o2nm_node_type);
spin_lock_init(&node->nd_lock);
atomic_set(&node->nd_pending_connects, 0);
atomic_set(&node->nd_sc_generation, 0);
@@ -559,18 +571,18 @@
return ret;
}
-static void nm_node_group_drop_item(struct config_group *group,
- struct config_item *item)
+static void o2nm_node_group_drop_item(struct config_group *group,
+ struct config_item *item)
{
- struct nm_node *node = to_nm_node(item);
- struct nm_cluster *cluster = to_nm_cluster(group->cg_item.ci_parent);
+ struct o2nm_node *node = to_o2nm_node(item);
+ struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent);
o2net_detach_sc(NULL, node);
if (cluster->cl_has_local &&
(cluster->cl_local_node == node->nd_num)) {
cluster->cl_has_local = 0;
- cluster->cl_local_node = NM_MAX_NODES;
+ cluster->cl_local_node = O2NM_MAX_NODES;
o2net_stop_rx_thread(node);
}
@@ -592,103 +604,105 @@
config_item_put(item);
}
-static struct configfs_group_operations nm_node_group_group_ops = {
- .make_item = nm_node_group_make_item,
- .drop_item = nm_node_group_drop_item,
+static struct configfs_group_operations o2nm_node_group_group_ops = {
+ .make_item = o2nm_node_group_make_item,
+ .drop_item = o2nm_node_group_drop_item,
};
-static struct config_item_type nm_node_group_type = {
- .ct_group_ops = &nm_node_group_group_ops,
+static struct config_item_type o2nm_node_group_type = {
+ .ct_group_ops = &o2nm_node_group_group_ops,
.ct_owner = THIS_MODULE,
};
/* cluster */
-static void nm_cluster_release(struct config_item *item)
+static void o2nm_cluster_release(struct config_item *item)
{
- struct nm_cluster *cluster = to_nm_cluster(item);
+ struct o2nm_cluster *cluster = to_o2nm_cluster(item);
kfree(cluster->cl_group.default_groups);
kfree(cluster);
}
-static struct configfs_item_operations nm_cluster_item_ops = {
- .release = nm_cluster_release,
+static struct configfs_item_operations o2nm_cluster_item_ops = {
+ .release = o2nm_cluster_release,
};
-static struct config_item_type nm_cluster_type = {
- .ct_item_ops = &nm_cluster_item_ops,
+static struct config_item_type o2nm_cluster_type = {
+ .ct_item_ops = &o2nm_cluster_item_ops,
.ct_owner = THIS_MODULE,
};
/* cluster set */
-struct nm_cluster_group {
+struct o2nm_cluster_group {
struct configfs_subsystem cs_subsys;
/* some stuff? */
};
#if 0
-static struct nm_cluster_group *to_nm_cluster_group(struct config_group *group)
+static struct o2nm_cluster_group *to_o2nm_cluster_group(struct config_group *group)
{
return group ?
- container_of(to_configfs_subsystem(group), struct nm_cluster_group, cs_subsys)
+ container_of(to_configfs_subsystem(group), struct o2nm_cluster_group, cs_subsys)
: NULL;
}
#endif
-static struct config_group *nm_cluster_group_make_group(struct config_group *group,
- const char *name)
+static struct config_group *o2nm_cluster_group_make_group(struct config_group *group,
+ const char *name)
{
- struct nm_cluster *cluster = NULL;
- struct nm_node_group *ns = NULL;
- struct config_group *hb_group = NULL, *ret = NULL;
+ struct o2nm_cluster *cluster = NULL;
+ struct o2nm_node_group *ns = NULL;
+ struct config_group *o2hb_group = NULL, *ret = NULL;
void *defs = NULL;
/* this runs under the parent dir's i_sem; there can be only
* one caller in here at a time */
- if (nm_single_cluster)
+ if (o2nm_single_cluster)
goto out; /* ENOSPC */
- cluster = kcalloc(1, sizeof(struct nm_cluster), GFP_KERNEL);
- ns = kcalloc(1, sizeof(struct nm_node_group), GFP_KERNEL);
+ cluster = kcalloc(1, sizeof(struct o2nm_cluster), GFP_KERNEL);
+ ns = kcalloc(1, sizeof(struct o2nm_node_group), GFP_KERNEL);
defs = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL);
- hb_group = o2hb_alloc_hb_set();
- if (cluster == NULL || ns == NULL || hb_group == NULL || defs == NULL)
+ o2hb_group = o2hb_alloc_hb_set();
+ if (cluster == NULL || ns == NULL || o2hb_group == NULL || defs == NULL)
goto out;
- config_group_init_type_name(&cluster->cl_group, name, &nm_cluster_type);
- config_group_init_type_name(&ns->ns_group, "node", &nm_node_group_type);
+ config_group_init_type_name(&cluster->cl_group, name,
+ &o2nm_cluster_type);
+ config_group_init_type_name(&ns->ns_group, "node",
+ &o2nm_node_group_type);
cluster->cl_group.default_groups = defs;
cluster->cl_group.default_groups[0] = &ns->ns_group;
- cluster->cl_group.default_groups[1] = hb_group;
+ cluster->cl_group.default_groups[1] = o2hb_group;
cluster->cl_group.default_groups[2] = NULL;
rwlock_init(&cluster->cl_nodes_lock);
cluster->cl_node_ip_tree = RB_ROOT;
ret = &cluster->cl_group;
- nm_single_cluster = cluster;
+ o2nm_single_cluster = cluster;
out:
if (ret == NULL) {
kfree(cluster);
kfree(ns);
- o2hb_free_hb_set(hb_group);
+ o2hb_free_hb_set(o2hb_group);
kfree(defs);
}
return ret;
}
-static void nm_cluster_group_drop_item(struct config_group *group, struct config_item *item)
+static void o2nm_cluster_group_drop_item(struct config_group *group, struct config_item *item)
{
- struct nm_cluster *cluster = to_nm_cluster(item);
+ struct o2nm_cluster *cluster = to_o2nm_cluster(item);
int i;
struct config_item *killme;
- BUG_ON(nm_single_cluster != cluster);
- nm_single_cluster = NULL;
+ BUG_ON(o2nm_single_cluster != cluster);
+ o2nm_single_cluster = NULL;
for (i = 0; cluster->cl_group.default_groups[i]; i++) {
killme = &cluster->cl_group.default_groups[i]->cg_item;
@@ -699,57 +713,57 @@
config_item_put(item);
}
-static struct configfs_group_operations nm_cluster_group_group_ops = {
- .make_group = nm_cluster_group_make_group,
- .drop_item = nm_cluster_group_drop_item,
+static struct configfs_group_operations o2nm_cluster_group_group_ops = {
+ .make_group = o2nm_cluster_group_make_group,
+ .drop_item = o2nm_cluster_group_drop_item,
};
-static struct config_item_type nm_cluster_group_type = {
- .ct_group_ops = &nm_cluster_group_group_ops,
+static struct config_item_type o2nm_cluster_group_type = {
+ .ct_group_ops = &o2nm_cluster_group_group_ops,
.ct_owner = THIS_MODULE,
};
-static struct nm_cluster_group nm_cluster_group = {
+static struct o2nm_cluster_group o2nm_cluster_group = {
.cs_subsys = {
.su_group = {
.cg_item = {
.ci_namebuf = "cluster",
- .ci_type = &nm_cluster_group_type,
+ .ci_type = &o2nm_cluster_group_type,
},
},
},
};
-#define NM_PROC_PATH "fs/ocfs2_nodemanager"
-static struct proc_dir_entry *nm_proc;
+#define O2NM_PROC_PATH "fs/ocfs2_nodemanager"
+static struct proc_dir_entry *o2nm_proc;
-#define NM_VERSION_PROC_NAME "interface_revision"
+#define O2NM_VERSION_PROC_NAME "interface_revision"
-static void nm_remove_proc(struct proc_dir_entry *parent)
+static void o2nm_remove_proc(struct proc_dir_entry *parent)
{
- remove_proc_entry(NM_VERSION_PROC_NAME, parent);
+ remove_proc_entry(O2NM_VERSION_PROC_NAME, parent);
}
-static void __exit exit_nm(void)
+static void __exit exit_o2nm(void)
{
if (ocfs2_table_header)
unregister_sysctl_table(ocfs2_table_header);
/* XXX sync with hb callbacks and shut down hb? */
o2net_unregister_hb_callbacks();
- configfs_unregister_subsystem(&nm_cluster_group.cs_subsys);
- nm_remove_proc(nm_proc);
- mlog_remove_proc(nm_proc);
- o2net_proc_exit(nm_proc);
- remove_proc_entry(NM_PROC_PATH, NULL);
+ configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys);
+ o2nm_remove_proc(o2nm_proc);
+ mlog_remove_proc(o2nm_proc);
+ o2net_proc_exit(o2nm_proc);
+ remove_proc_entry(O2NM_PROC_PATH, NULL);
}
-static int nm_proc_version(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int o2nm_proc_version(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
{
int len;
- len = sprintf(page, "%u\n", NM_API_VERSION);
+ len = sprintf(page, "%u\n", O2NM_API_VERSION);
if (len < 0)
return len;
@@ -769,21 +783,21 @@
return len;
}
-static int nm_init_proc(struct proc_dir_entry *parent)
+static int o2nm_init_proc(struct proc_dir_entry *parent)
{
struct proc_dir_entry *p;
- p = create_proc_read_entry(NM_VERSION_PROC_NAME,
+ p = create_proc_read_entry(O2NM_VERSION_PROC_NAME,
S_IFREG | S_IRUGO,
parent,
- nm_proc_version,
+ o2nm_proc_version,
NULL);
if (!p)
return -ENOMEM;
return 0;
}
-static int __init init_nm(void)
+static int __init init_o2nm(void)
{
int ret = -1;
@@ -802,38 +816,38 @@
if (ret)
goto out_sysctl;
- config_group_init(&nm_cluster_group.cs_subsys.su_group);
- init_MUTEX(&nm_cluster_group.cs_subsys.su_sem);
- ret = configfs_register_subsystem(&nm_cluster_group.cs_subsys);
+ config_group_init(&o2nm_cluster_group.cs_subsys.su_group);
+ init_MUTEX(&o2nm_cluster_group.cs_subsys.su_sem);
+ ret = configfs_register_subsystem(&o2nm_cluster_group.cs_subsys);
if (ret) {
printk(KERN_ERR "nodemanager: Registration returned %d\n", ret);
goto out_callbacks;
}
- nm_proc = proc_mkdir(NM_PROC_PATH, NULL);
- if (nm_proc == NULL) {
+ o2nm_proc = proc_mkdir(O2NM_PROC_PATH, NULL);
+ if (o2nm_proc == NULL) {
ret = -ENOMEM; /* shrug */
goto out_subsys;
}
- ret = mlog_init_proc(nm_proc);
+ ret = mlog_init_proc(o2nm_proc);
if (ret)
goto out_remove;
- ret = o2net_proc_init(nm_proc);
+ ret = o2net_proc_init(o2nm_proc);
if (ret)
goto out_mlog;
- ret = nm_init_proc(nm_proc);
+ ret = o2nm_init_proc(o2nm_proc);
if (ret == 0)
goto out;
out_mlog:
- mlog_remove_proc(nm_proc);
+ mlog_remove_proc(o2nm_proc);
out_remove:
- remove_proc_entry(NM_PROC_PATH, NULL);
+ remove_proc_entry(O2NM_PROC_PATH, NULL);
out_subsys:
- configfs_unregister_subsystem(&nm_cluster_group.cs_subsys);
+ configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys);
out_callbacks:
o2net_unregister_hb_callbacks();
out_sysctl:
@@ -845,5 +859,5 @@
MODULE_AUTHOR("Oracle");
MODULE_LICENSE("GPL");
-module_init(init_nm)
-module_exit(exit_nm)
+module_init(init_o2nm)
+module_exit(exit_o2nm)
Modified: trunk/fs/ocfs2/cluster/nodemanager.h
===================================================================
--- trunk/fs/ocfs2/cluster/nodemanager.h 2005-06-01 23:43:31 UTC (rev 2352)
+++ trunk/fs/ocfs2/cluster/nodemanager.h 2005-06-02 00:59:25 UTC (rev 2353)
@@ -36,12 +36,12 @@
#define KERN_OCFS2 988
#define KERN_OCFS2_NM 1
-const char *nm_get_hb_ctl_path(void);
+const char *o2nm_get_hb_ctl_path(void);
-struct nm_node {
+struct o2nm_node {
spinlock_t nd_lock;
struct config_item nd_item;
- char nd_name[NM_MAX_NAME_LEN+1]; /* replace? */
+ char nd_name[O2NM_MAX_NAME_LEN+1]; /* replace? */
__u8 nd_num;
/* only one address per node, as attributes, for now. both
* in network order */
@@ -69,12 +69,12 @@
struct list_head nd_status_list;
};
-u8 nm_this_node(void);
+u8 o2nm_this_node(void);
-int nm_configured_node_map(unsigned long *map, unsigned bytes);
-struct nm_node * nm_get_node_by_num(u8 node_num);
-struct nm_node * nm_get_node_by_ip(u32 addr);
-void nm_node_get(struct nm_node *node);
-void nm_node_put(struct nm_node *node);
+int o2nm_configured_node_map(unsigned long *map, unsigned bytes);
+struct o2nm_node *o2nm_get_node_by_num(u8 node_num);
+struct o2nm_node *o2nm_get_node_by_ip(u32 addr);
+void o2nm_node_get(struct o2nm_node *node);
+void o2nm_node_put(struct o2nm_node *node);
#endif /* CLUSTER_NODEMANAGER_H */
Modified: trunk/fs/ocfs2/cluster/ocfs2_nodemanager.h
===================================================================
--- trunk/fs/ocfs2/cluster/ocfs2_nodemanager.h 2005-06-01 23:43:31 UTC (rev 2352)
+++ trunk/fs/ocfs2/cluster/ocfs2_nodemanager.h 2005-06-02 00:59:25 UTC (rev 2353)
@@ -28,12 +28,12 @@
#ifndef _OCFS2_NODEMANAGER_H
#define _OCFS2_NODEMANAGER_H
-#define NM_API_VERSION 2
+#define O2NM_API_VERSION 2
-#define NM_MAX_NODES 255
-#define NM_INVALID_NODE_NUM 255
+#define O2NM_MAX_NODES 255
+#define O2NM_INVALID_NODE_NUM 255
/* host name, group name, cluster name all 64 bytes */
-#define NM_MAX_NAME_LEN 64 // __NEW_UTS_LEN
+#define O2NM_MAX_NAME_LEN 64 // __NEW_UTS_LEN
#endif /* _OCFS2_NODEMANAGER_H */
Modified: trunk/fs/ocfs2/cluster/tcp.c
===================================================================
--- trunk/fs/ocfs2/cluster/tcp.c 2005-06-01 23:43:31 UTC (rev 2352)
+++ trunk/fs/ocfs2/cluster/tcp.c 2005-06-02 00:59:25 UTC (rev 2353)
@@ -44,7 +44,7 @@
* it is safe to tear down the socket.
*
* A caller who wants to communicate with a node gets a ref to the sock
- * container by finding it the nm_node that it wants to communicate with.
+ * container by finding it the o2nm_node that it wants to communicate with.
* The sock container will only be found on the node once there is a valid
* socket in the container. The socket is only finally torn down from
* the container when the container loses all of its references -- so as
@@ -152,22 +152,22 @@
static int o2net_receive_thread(void *data);
static int o2net_receive(void);
static void o2net_try_accept(struct socket *sock);
-static int o2net_process_message(struct nm_node *node, struct socket *sock,
+static int o2net_process_message(struct o2nm_node *node, struct socket *sock,
o2net_msg *hdr);
static void o2net_data_ready(struct sock *sk, int bytes);
-static int o2net_sc_from_node(struct nm_node *node,
+static int o2net_sc_from_node(struct o2nm_node *node,
struct o2net_sock_container **sc_ret);
static int o2net_attach_sc(struct o2net_sock_container *sc);
-static void o2net_finish_connect(struct nm_node *node);
-static struct o2net_sock_container *sc_alloc(struct nm_node *node,
+static void o2net_finish_connect(struct o2nm_node *node);
+static struct o2net_sock_container *sc_alloc(struct o2nm_node *node,
int from_conn);
static void o2net_state_change(struct sock *sk);
-static void o2net_complete_nodes_nsw(struct nm_node *node);
+static void o2net_complete_nodes_nsw(struct o2nm_node *node);
//////////////////////
-int o2net_start_rx_thread(struct nm_node *node)
+int o2net_start_rx_thread(struct o2nm_node *node)
{
struct socket *sock;
int ret = 0;
@@ -207,7 +207,7 @@
return 0;
}
-void o2net_stop_rx_thread(struct nm_node *node)
+void o2net_stop_rx_thread(struct o2nm_node *node)
{
if (node->nd_rx_thread) {
mlog(ML_KTHREAD, "waiting for net thread to exit....\n");
@@ -289,7 +289,7 @@
* specified instead of an sc such that any sc currently attached to the
* node will be detached.
*/
-void o2net_detach_sc(struct o2net_sock_container *sc, struct nm_node *node)
+void o2net_detach_sc(struct o2net_sock_container *sc, struct o2nm_node *node)
{
int nr_puts = 0;
struct sock *sk;
@@ -349,7 +349,7 @@
nr_puts++;
}
spin_unlock_bh(&node->nd_lock);
- nm_node_put(node);
+ o2nm_node_put(node);
node = NULL;
}
@@ -659,7 +659,7 @@
return ret;
}
-static int o2net_prep_nsw(struct nm_node *node, struct o2net_status_wait *nsw)
+static int o2net_prep_nsw(struct o2nm_node *node, struct o2net_status_wait *nsw)
{
int ret = 0;
@@ -694,7 +694,7 @@
return ret;
}
-static void o2net_complete_nsw_locked(struct nm_node *node,
+static void o2net_complete_nsw_locked(struct o2nm_node *node,
struct o2net_status_wait *nsw,
enum o2net_system_error sys_status,
s32 status)
@@ -710,7 +710,7 @@
}
}
-static void o2net_complete_nsw(struct nm_node *node,
+static void o2net_complete_nsw(struct o2nm_node *node,
struct o2net_status_wait *nsw,
u64 id, enum o2net_system_error sys_status,
s32 status)
@@ -732,7 +732,7 @@
return;
}
-static void o2net_complete_nodes_nsw(struct nm_node *node)
+static void o2net_complete_nodes_nsw(struct o2nm_node *node)
{
struct list_head *iter, *tmp;
unsigned int num_kills = 0;
@@ -750,7 +750,7 @@
node->nd_num, num_kills);
}
-static int o2net_nsw_completed(struct nm_node * node,
+static int o2net_nsw_completed(struct o2nm_node *node,
struct o2net_status_wait *nsw)
{
int completed;
@@ -768,7 +768,7 @@
size_t i, iovlen, caller_bytes = 0;
struct iovec *iov = NULL;
struct o2net_sock_container *sc = NULL;
- struct nm_node *node = NULL;
+ struct o2nm_node *node = NULL;
struct o2net_status_wait nsw = {
.ns_node_item = LIST_HEAD_INIT(nsw.ns_node_item),
};
@@ -802,12 +802,12 @@
goto out;
}
- if (target_node == nm_this_node()) {
+ if (target_node == o2nm_this_node()) {
ret = -ELOOP;
goto out;
}
- node = nm_get_node_by_num(target_node);
+ node = o2nm_get_node_by_num(target_node);
if (node == NULL) {
mlog(0, "node %u unknown\n", target_node);
ret = -EINVAL;
@@ -893,7 +893,7 @@
kfree(msg);
if (node) {
o2net_complete_nsw(node, &nsw, 0, 0, 0);
- nm_node_put(node);
+ o2nm_node_put(node);
}
return ret;
}
@@ -1008,7 +1008,7 @@
int err = 0, read_eagain, read_some;
void *data;
size_t datalen;
- struct nm_node *node = NULL;
+ struct o2nm_node *node = NULL;
spin_lock_bh(&o2net_active_lock);
while (!list_empty(&o2net_active_list)) {
@@ -1031,7 +1031,7 @@
if (sc->sc_node == NULL)
err = -ENOTCONN;
else {
- nm_node_get(sc->sc_node);
+ o2nm_node_get(sc->sc_node);
node = sc->sc_node;
}
spin_unlock_bh(&sc->sc_lock);
@@ -1101,7 +1101,7 @@
done:
if (node) {
- nm_node_put(node);
+ o2nm_node_put(node);
node = NULL;
}
@@ -1136,7 +1136,7 @@
/* if we get a message that the node has gone down then we detach its
* active socket. This will send error codes to any transmitters that
* were waiting for status replies on that node. */
-static void o2net_hb_node_down_cb(struct nm_node *node, int node_num,
+static void o2net_hb_node_down_cb(struct o2nm_node *node, int node_num,
void *data)
{
o2net_detach_sc(NULL, node);
@@ -1174,7 +1174,7 @@
/* this returns -errno if the header was unknown or too large, etc.
* after this is called the buffer us reused for the next message */
-static int o2net_process_message(struct nm_node *node, struct socket *sock,
+static int o2net_process_message(struct o2nm_node *node, struct socket *sock,
o2net_msg *hdr)
{
int ret, handler_status;
@@ -1238,9 +1238,9 @@
static int o2net_attach_sc(struct o2net_sock_container *sc)
{
int ret = 0, opt = 1;
- u8 this_node = nm_this_node(); /* :( */
+ u8 this_node = o2nm_this_node(); /* :( */
struct o2net_sock_container *detach = NULL;
- struct nm_node *node = sc->sc_node;
+ struct o2nm_node *node = sc->sc_node;
mm_segment_t oldfs;
sclog(sc, "attaching with node %p\n", node);
@@ -1358,7 +1358,7 @@
state_change(sk);
}
-static struct o2net_sock_container *sc_alloc(struct nm_node *node,
+static struct o2net_sock_container *sc_alloc(struct o2nm_node *node,
int from_conn)
{
struct o2net_sock_container *sc, *ret = NULL;
@@ -1371,7 +1371,7 @@
kref_init(&sc->sc_kref, sc_kref_release);
spin_lock_init(&sc->sc_lock);
- nm_node_get(node);
+ o2nm_node_get(node);
sc->sc_node = node;
sc->sc_from_connect = from_conn;
sc->sc_pending_connect = from_conn;
@@ -1395,7 +1395,7 @@
return ret;
}
-static void o2net_finish_connect(struct nm_node *node)
+static void o2net_finish_connect(struct o2nm_node *node)
{
atomic_dec(&node->nd_pending_connects);
atomic_inc(&node->nd_sc_generation);
@@ -1403,7 +1403,7 @@
wake_up_all(&node->nd_sc_wq);
}
-static int o2net_start_connect(struct nm_node *node, u32 addr, u16 port)
+static int o2net_start_connect(struct o2nm_node *node, u32 addr, u16 port)
{
struct socket *sock = NULL;
struct sockaddr_in myaddr, remoteaddr;
@@ -1496,13 +1496,13 @@
/*
* some tx code path would like to send a message. For them to do so
- * we need to follow an nm_node struct to find an active sock container.
+ * we need to follow an o2nm_node struct to find an active sock container.
* If we find a node without a sock container then we try and issue a
* connect and wait for its outcome. We only really block waiting for
* the node to have an active socket. The rx thread might race with
* the connect() to accept() a socket that we'll happily use.
*/
-static int o2net_sc_from_node(struct nm_node *node,
+static int o2net_sc_from_node(struct o2nm_node *node,
struct o2net_sock_container **sc_ret)
{
struct o2net_sock_container *sc = NULL;
@@ -1586,7 +1586,7 @@
int ret, slen;
struct sockaddr_in sin;
struct socket *new_sock = NULL;
- struct nm_node *node = NULL;
+ struct o2nm_node *node = NULL;
struct o2net_sock_container *sc = NULL;
BUG_ON(sock == NULL);
@@ -1607,7 +1607,7 @@
if (ret < 0)
goto out;
- node = nm_get_node_by_ip(sin.sin_addr.s_addr);
+ node = o2nm_get_node_by_ip(sin.sin_addr.s_addr);
if (node == NULL) {
mlog(ML_NOTICE, "attempt to connect from unknown node at "
"%u.%u.%u.%u:%d\n", NIPQUAD(sin.sin_addr.s_addr),
@@ -1634,7 +1634,7 @@
}
}
if (node)
- nm_node_put(node);
+ o2nm_node_put(node);
if (sc)
sc_put(sc);
return;
Modified: trunk/fs/ocfs2/cluster/tcp.h
===================================================================
--- trunk/fs/ocfs2/cluster/tcp.h 2005-06-01 23:43:31 UTC (rev 2352)
+++ trunk/fs/ocfs2/cluster/tcp.h 2005-06-02 00:59:25 UTC (rev 2353)
@@ -134,10 +134,10 @@
int o2net_register_hb_callbacks(void);
void o2net_unregister_hb_callbacks(void);
-int o2net_start_rx_thread(struct nm_node *node);
-void o2net_stop_rx_thread(struct nm_node *node);
+int o2net_start_rx_thread(struct o2nm_node *node);
+void o2net_stop_rx_thread(struct o2nm_node *node);
struct o2net_sock_container;
-void o2net_detach_sc(struct o2net_sock_container *sc, struct nm_node *node);
+void o2net_detach_sc(struct o2net_sock_container *sc, struct o2nm_node *node);
int o2net_proc_init(struct proc_dir_entry *parent);
void o2net_proc_exit(struct proc_dir_entry *parent);
Modified: trunk/fs/ocfs2/cluster/tcp_internal.h
===================================================================
--- trunk/fs/ocfs2/cluster/tcp_internal.h 2005-06-01 23:43:31 UTC (rev 2352)
+++ trunk/fs/ocfs2/cluster/tcp_internal.h 2005-06-02 00:59:25 UTC (rev 2353)
@@ -31,7 +31,7 @@
struct kref sc_kref;
spinlock_t sc_lock;
struct socket *sc_sock;
- struct nm_node *sc_node;
+ struct o2nm_node *sc_node;
unsigned sc_from_connect:1,
sc_pending_connect:1;
@@ -79,7 +79,7 @@
struct o2net_send_tracking {
struct list_head st_net_proc_item;
struct task_struct *st_task;
- struct nm_node *st_node;
+ struct o2nm_node *st_node;
struct o2net_sock_container *st_sc;
u32 st_msg_type;
u32 st_msg_key;
Modified: trunk/fs/ocfs2/dlm/dlmcommon.h
===================================================================
--- trunk/fs/ocfs2/dlm/dlmcommon.h 2005-06-01 23:43:31 UTC (rev 2352)
+++ trunk/fs/ocfs2/dlm/dlmcommon.h 2005-06-02 00:59:25 UTC (rev 2353)
@@ -36,7 +36,7 @@
#define DLM_LOCKID_NAME_MAX 32
#define DLM_DOMAIN_NAME_MAX_LEN 255
-#define DLM_LOCK_RES_OWNER_UNKNOWN NM_MAX_NODES
+#define DLM_LOCK_RES_OWNER_UNKNOWN O2NM_MAX_NODES
#define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes
#define DLM_THREAD_MS 200 // flush at least every 200 ms
@@ -74,7 +74,7 @@
struct list_head node_data;
u8 new_master;
u8 dead_node;
- unsigned long node_map[BITS_TO_LONGS(NM_MAX_NODES)];
+ unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
} dlm_recovery_ctxt;
typedef enum _dlm_ctxt_state {
@@ -101,9 +101,9 @@
u32 key;
u8 joining_node;
wait_queue_head_t dlm_join_events;
- unsigned long live_nodes_map[BITS_TO_LONGS(NM_MAX_NODES)];
- unsigned long domain_map[BITS_TO_LONGS(NM_MAX_NODES)];
- unsigned long recovery_map[BITS_TO_LONGS(NM_MAX_NODES)];
+ unsigned long live_nodes_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ unsigned long domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ unsigned long recovery_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
dlm_recovery_ctxt reco;
spinlock_t master_lock;
struct list_head master_list;
@@ -308,10 +308,10 @@
wait_queue_head_t wq;
atomic_t woken;
struct kref mle_refs;
- unsigned long maybe_map[BITS_TO_LONGS(NM_MAX_NODES)];
- unsigned long vote_map[BITS_TO_LONGS(NM_MAX_NODES)];
- unsigned long response_map[BITS_TO_LONGS(NM_MAX_NODES)];
- unsigned long node_map[BITS_TO_LONGS(NM_MAX_NODES)];
+ unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
u8 master;
u8 new_master;
u8 error;
@@ -326,7 +326,7 @@
typedef struct _dlm_node_iter
{
- unsigned long node_map[BITS_TO_LONGS(NM_MAX_NODES)];
+ unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
int curnode;
} dlm_node_iter;
@@ -385,7 +385,7 @@
u16 pad1;
u32 flags;
- u8 name[NM_MAX_NAME_LEN];
+ u8 name[O2NM_MAX_NAME_LEN];
} dlm_master_request;
#define DLM_ASSERT_MASTER_MLE_CLEANUP 0x00000001
@@ -398,7 +398,7 @@
u16 pad1;
u32 flags;
- u8 name[NM_MAX_NAME_LEN];
+ u8 name[O2NM_MAX_NAME_LEN];
} dlm_assert_master;
typedef struct _dlm_migrate_request
@@ -408,7 +408,7 @@
u8 namelen;
u8 pad1;
u32 pad2;
- u8 name[NM_MAX_NAME_LEN];
+ u8 name[O2NM_MAX_NAME_LEN];
} dlm_migrate_request;
typedef struct _dlm_master_requery
@@ -418,7 +418,7 @@
u8 node_idx;
u8 namelen;
u32 pad3;
- u8 name[NM_MAX_NAME_LEN];
+ u8 name[O2NM_MAX_NAME_LEN];
} dlm_master_requery;
#define DLM_MRES_RECOVERY 0x01
@@ -461,7 +461,7 @@
s8 requested_type;
u8 namelen;
- u8 name[NM_MAX_NAME_LEN];
+ u8 name[O2NM_MAX_NAME_LEN];
} dlm_create_lock;
typedef struct _dlm_convert_lock
@@ -474,7 +474,7 @@
s8 requested_type;
u8 namelen;
- u8 name[NM_MAX_NAME_LEN];
+ u8 name[O2NM_MAX_NAME_LEN];
s8 lvb[0];
} dlm_convert_lock;
@@ -489,7 +489,7 @@
u8 node_idx;
u8 namelen;
- u8 name[NM_MAX_NAME_LEN];
+ u8 name[O2NM_MAX_NAME_LEN];
s8 lvb[0];
} dlm_unlock_lock;
@@ -505,7 +505,7 @@
u8 blocked_type;
u8 namelen;
- u8 name[NM_MAX_NAME_LEN];
+ u8 name[O2NM_MAX_NAME_LEN];
s8 lvb[0];
} dlm_proxy_ast;
@@ -553,7 +553,7 @@
u8 node_idx;
u8 pad1[2];
u8 name_len;
- u8 domain[NM_MAX_NAME_LEN];
+ u8 domain[O2NM_MAX_NAME_LEN];
} dlm_query_join_request;
typedef struct _dlm_assert_joined
@@ -561,7 +561,7 @@
u8 node_idx;
u8 pad1[2];
u8 name_len;
- u8 domain[NM_MAX_NAME_LEN];
+ u8 domain[O2NM_MAX_NAME_LEN];
} dlm_assert_joined;
typedef struct _dlm_cancel_join
@@ -569,7 +569,7 @@
u8 node_idx;
u8 pad1[2];
u8 name_len;
- u8 domain[NM_MAX_NAME_LEN];
+ u8 domain[O2NM_MAX_NAME_LEN];
} dlm_cancel_join;
typedef struct _dlm_exit_domain
@@ -864,8 +864,8 @@
int dlm_nm_init(dlm_ctxt *dlm);
int dlm_heartbeat_init(dlm_ctxt *dlm);
-void dlm_hb_node_down_cb(struct nm_node *node, int idx, void *data);
-void dlm_hb_node_up_cb(struct nm_node *node, int idx, void *data);
+void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data);
+void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data);
int dlm_hb_node_dead(dlm_ctxt *dlm, int node);
int __dlm_hb_node_dead(dlm_ctxt *dlm, int node);
@@ -914,9 +914,9 @@
int dlm_init_mle_cache(void);
void dlm_destroy_mle_cache(void);
void dlm_mle_node_down(dlm_ctxt *dlm, dlm_master_list_entry *mle,
- struct nm_node *node, int idx);
+ struct o2nm_node *node, int idx);
void dlm_mle_node_up(dlm_ctxt *dlm, dlm_master_list_entry *mle,
- struct nm_node *node, int idx);
+ struct o2nm_node *node, int idx);
int dlm_do_assert_master(dlm_ctxt *dlm, const char *lockname,
unsigned int namelen, void *nodemap,
u32 flags);
@@ -1029,9 +1029,9 @@
{
int bit;
DLM_ASSERT(iter);
- bit = find_next_bit(iter->node_map, NM_MAX_NODES, iter->curnode+1);
- if (bit >= NM_MAX_NODES) {
- iter->curnode = NM_MAX_NODES;
+ bit = find_next_bit(iter->node_map, O2NM_MAX_NODES, iter->curnode+1);
+ if (bit >= O2NM_MAX_NODES) {
+ iter->curnode = O2NM_MAX_NODES;
return -ENOENT;
}
iter->curnode = bit;
Modified: trunk/fs/ocfs2/dlm/dlmdebug.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmdebug.c 2005-06-01 23:43:31 UTC (rev 2352)
+++ trunk/fs/ocfs2/dlm/dlmdebug.c 2005-06-02 00:59:25 UTC (rev 2353)
@@ -71,7 +71,7 @@
sizeof(dlm_debug_funcs));
static ssize_t write_dlm_debug(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
int i;
char c;
@@ -316,7 +316,7 @@
mlog(0, "found dlm=%p, lockres=%p\n", dlm, res);
{
int ret;
- ret = dlm_migrate_lockres(dlm, res, NM_MAX_NODES);
+ ret = dlm_migrate_lockres(dlm, res, O2NM_MAX_NODES);
printk("dlm_migrate_lockres returned %d\n", ret);
}
dlm_lockres_put(res);
Modified: trunk/fs/ocfs2/dlm/dlmdomain.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmdomain.c 2005-06-01 23:43:31 UTC (rev 2352)
+++ trunk/fs/ocfs2/dlm/dlmdomain.c 2005-06-02 00:59:25 UTC (rev 2353)
@@ -389,9 +389,9 @@
mlog(ML_NOTICE, "Nodes in my domain (\"%s\"):\n", dlm->name);
- while ((node = find_next_bit(dlm->domain_map, NM_MAX_NODES, node + 1))
+ while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, node + 1))
!= -1) {
- if (node >= NM_MAX_NODES)
+ if (node >= O2NM_MAX_NODES)
break;
mlog(ML_NOTICE, " node %d\n", node);
}
@@ -460,9 +460,9 @@
spin_lock(&dlm->spinlock);
/* Clear ourselves from the domain map */
clear_bit(dlm->node_num, dlm->domain_map);
- while ((node = find_next_bit(dlm->domain_map, NM_MAX_NODES, 0))
+ while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0))
!= -1) {
- if (node >= NM_MAX_NODES)
+ if (node >= O2NM_MAX_NODES)
break;
/* Drop the dlm spinlock. This is safe wrt the domain_map.
@@ -693,14 +693,14 @@
int status, tmpstat;
unsigned int node;
- if (map_size != BITS_TO_LONGS(NM_MAX_NODES))
+ if (map_size != BITS_TO_LONGS(O2NM_MAX_NODES))
return -EINVAL;
status = 0;
node = -1;
- while ((node = find_next_bit(node_map, NM_MAX_NODES, node + 1))
+ while ((node = find_next_bit(node_map, O2NM_MAX_NODES, node + 1))
!= -1) {
- if (node >= NM_MAX_NODES)
+ if (node >= O2NM_MAX_NODES)
break;
if (node == dlm->node_num)
@@ -800,9 +800,9 @@
status = 0;
node = -1;
- while ((node = find_next_bit(node_map, NM_MAX_NODES, node + 1))
+ while ((node = find_next_bit(node_map, O2NM_MAX_NODES, node + 1))
!= -1) {
- if (node >= NM_MAX_NODES)
+ if (node >= O2NM_MAX_NODES)
break;
if (node == dlm->node_num)
@@ -831,8 +831,8 @@
}
struct domain_join_ctxt {
- unsigned long live_map[BITS_TO_LONGS(NM_MAX_NODES)];
- unsigned long yes_resp_map[BITS_TO_LONGS(NM_MAX_NODES)];
+ unsigned long live_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ unsigned long yes_resp_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
};
static int dlm_should_restart_join(dlm_ctxt *dlm,
@@ -888,9 +888,9 @@
spin_unlock(&dlm->spinlock);
node = -1;
- while ((node = find_next_bit(ctxt->live_map, NM_MAX_NODES, node + 1))
+ while ((node = find_next_bit(ctxt->live_map, O2NM_MAX_NODES, node + 1))
!= -1) {
- if (node >= NM_MAX_NODES)
+ if (node >= O2NM_MAX_NODES)
break;
if (node == dlm->node_num)
@@ -1178,7 +1178,7 @@
strcpy(dlm->name, domain);
dlm->key = key;
- dlm->node_num = nm_this_node();
+ dlm->node_num = o2nm_this_node();
spin_lock_init(&dlm->spinlock);
spin_lock_init(&dlm->master_lock);
@@ -1212,8 +1212,8 @@
dlm->joining_node = DLM_LOCK_RES_OWNER_UNKNOWN;
init_waitqueue_head(&dlm->dlm_join_events);
- dlm->reco.new_master = NM_INVALID_NODE_NUM;
- dlm->reco.dead_node = NM_INVALID_NODE_NUM;
+ dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
+ dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
atomic_set(&dlm->local_resources, 0);
atomic_set(&dlm->remote_resources, 0);
atomic_set(&dlm->unknown_resources, 0);
@@ -1242,7 +1242,7 @@
dlm_ctxt *dlm = NULL;
dlm_ctxt *new_ctxt = NULL;
- if (strlen(domain) > NM_MAX_NAME_LEN) {
+ if (strlen(domain) > O2NM_MAX_NAME_LEN) {
mlog(ML_ERROR, "domain name length too long\n");
goto leave;
}
Modified: trunk/fs/ocfs2/dlm/dlmfs.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmfs.c 2005-06-01 23:43:31 UTC (rev 2352)
+++ trunk/fs/ocfs2/dlm/dlmfs.c 2005-06-02 00:59:25 UTC (rev 2353)
@@ -422,7 +422,7 @@
mlog(0, "mkdir %.*s\n", domain->len, domain->name);
/* verify that we have a proper domain */
- if (domain->len >= NM_MAX_NAME_LEN) {
+ if (domain->len >= O2NM_MAX_NAME_LEN) {
status = -EINVAL;
mlog(ML_ERROR, "invalid domain name for directory.\n");
goto bail;
Modified: trunk/fs/ocfs2/dlm/dlmmaster.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmmaster.c 2005-06-01 23:43:31 UTC (rev 2352)
+++ trunk/fs/ocfs2/dlm/dlmmaster.c 2005-06-02 00:59:25 UTC (rev 2353)
@@ -252,8 +252,8 @@
atomic_set(&mle->woken, 0);
kref_init(&mle->mle_refs, dlm_mle_release);
memset(mle->response_map, 0, sizeof(mle->response_map));
- mle->master = NM_MAX_NODES;
- mle->new_master = NM_MAX_NODES;
+ mle->master = O2NM_MAX_NODES;
+ mle->new_master = O2NM_MAX_NODES;
mle->error = 0;
if (mle->type == DLM_MLE_MASTER) {
@@ -302,7 +302,7 @@
void dlm_mle_node_down(dlm_ctxt *dlm, dlm_master_list_entry *mle,
- struct nm_node *node, int idx)
+ struct o2nm_node *node, int idx)
{
DLM_ASSERT(mle);
DLM_ASSERT(dlm);
@@ -324,7 +324,7 @@
}
void dlm_mle_node_up(dlm_ctxt *dlm, dlm_master_list_entry *mle,
- struct nm_node *node, int idx)
+ struct o2nm_node *node, int idx)
{
DLM_ASSERT(mle);
DLM_ASSERT(dlm);
@@ -685,7 +685,7 @@
ret = dlm_do_master_request(mle, nodenum);
if (ret < 0)
mlog_errno(ret);
- if (mle->master != NM_MAX_NODES) {
+ if (mle->master != O2NM_MAX_NODES) {
/* found a master ! */
break;
}
@@ -761,7 +761,7 @@
goto leave;
}
- if (m != NM_MAX_NODES) {
+ if (m != O2NM_MAX_NODES) {
/* another node has done an assert!
* all done! */
sleep = 0;
@@ -769,7 +769,7 @@
sleep = 1;
/* have all nodes responded? */
if (voting_done && !blocked) {
- bit = find_next_bit(mle->maybe_map, NM_MAX_NODES, 0);
+ bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
if (dlm->node_num <= bit) {
/* my node number is lowest.
* now tell other nodes that I am
@@ -1129,7 +1129,7 @@
dlm_node_iter iter;
int ret = 0;
- DLM_ASSERT(namelen <= NM_MAX_NAME_LEN);
+ DLM_ASSERT(namelen <= O2NM_MAX_NAME_LEN);
DLM_ASSERT(dlm);
DLM_ASSERT(lockname);
DLM_ASSERT(nodemap);
@@ -1210,8 +1210,8 @@
"MLE for it! (%.*s)\n", assert->node_idx,
namelen, name);
} else {
- int bit = find_next_bit (mle->maybe_map, NM_MAX_NODES, 0);
- if (bit >= NM_MAX_NODES) {
+ int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
+ if (bit >= O2NM_MAX_NODES) {
mlog(ML_ERROR, "no bits set in the maybe_map, but %u "
"is asserting! (%.*s)\n", assert->node_idx,
namelen, name);
@@ -1358,7 +1358,7 @@
dlm_ctxt *dlm = data;
int ret = 0;
dlm_lock_resource *res;
- unsigned long nodemap[BITS_TO_LONGS(NM_MAX_NODES)];
+ unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
int ignore_higher;
int bit;
u8 request_from;
@@ -1388,9 +1388,9 @@
clear_bit(request_from, nodemap);
bit = dlm->node_num;
while (1) {
- bit = find_next_bit(nodemap, NM_MAX_NODES,
+ bit = find_next_bit(nodemap, O2NM_MAX_NODES,
bit+1);
- if (bit >= NM_MAX_NODES)
+ if (bit >= O2NM_MAX_NODES)
break;
clear_bit(bit, nodemap);
}
@@ -1511,12 +1511,12 @@
spin_lock(&dlm->spinlock);
/* pick a new node */
if (!test_bit(target, dlm->domain_map) ||
- target >= NM_MAX_NODES) {
+ target >= O2NM_MAX_NODES) {
target = dlm_pick_migration_target(dlm, res);
}
mlog(0, "node %u chosen for migration\n", target);
- if (target >= NM_MAX_NODES ||
+ if (target >= O2NM_MAX_NODES ||
!test_bit(target, dlm->domain_map)) {
/* target chosen is not alive */
ret = -EINVAL;
@@ -1757,9 +1757,9 @@
nodenum = -1;
while (1) {
nodenum = find_next_bit(dlm->domain_map,
- NM_MAX_NODES, nodenum+1);
+ O2NM_MAX_NODES, nodenum+1);
mlog(0, "found %d in domain map\n", nodenum);
- if (nodenum >= NM_MAX_NODES)
+ if (nodenum >= O2NM_MAX_NODES)
break;
if (nodenum != dlm->node_num) {
mlog(0, "picking %d\n", nodenum);
@@ -1992,7 +1992,7 @@
if (mle->type == DLM_MLE_MASTER)
continue;
- bit = find_next_bit(mle->maybe_map, NM_MAX_NODES, 0);
+ bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
/* BLOCK mles are initiated by other nodes.
* need to clean up if the dead node would have
Modified: trunk/fs/ocfs2/dlm/dlmrecovery.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmrecovery.c 2005-06-01 23:43:31 UTC (rev 2352)
+++ trunk/fs/ocfs2/dlm/dlmrecovery.c 2005-06-02 00:59:25 UTC (rev 2353)
@@ -117,8 +117,8 @@
{
spin_lock(&dlm->spinlock);
clear_bit(dlm->reco.dead_node, dlm->recovery_map);
- dlm->reco.dead_node = NM_INVALID_NODE_NUM;
- dlm->reco.new_master = NM_INVALID_NODE_NUM;
+ dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
+ dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
spin_unlock(&dlm->spinlock);
}
@@ -259,31 +259,31 @@
spin_lock(&dlm->spinlock);
/* check to see if the new master has died */
- if (dlm->reco.new_master != NM_INVALID_NODE_NUM &&
+ if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
test_bit(dlm->reco.new_master, dlm->recovery_map)) {
mlog(0, "new master %u died while recovering %u!\n",
dlm->reco.new_master, dlm->reco.dead_node);
/* unset the new_master, leave dead_node */
- dlm->reco.new_master = NM_INVALID_NODE_NUM;
+ dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
}
/* select a target to recover */
- if (dlm->reco.dead_node == NM_INVALID_NODE_NUM) {
+ if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
int bit;
- bit = find_next_bit (dlm->recovery_map, NM_MAX_NODES+1, 0);
- if (bit >= NM_MAX_NODES || bit < 0)
- dlm->reco.dead_node = NM_INVALID_NODE_NUM;
+ bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0);
+ if (bit >= O2NM_MAX_NODES || bit < 0)
+ dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
else
dlm->reco.dead_node = bit;
} else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
/* BUG? */
mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
dlm->reco.dead_node);
- dlm->reco.dead_node = NM_INVALID_NODE_NUM;
+ dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
}
- if (dlm->reco.dead_node == NM_INVALID_NODE_NUM) {
+ if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
// mlog(0, "nothing to recover! sleeping now!\n");
spin_unlock(&dlm->spinlock);
/* return to main thread loop and sleep. */
@@ -300,7 +300,7 @@
if (dlm->reco.new_master == dlm->node_num)
goto master_here;
- if (dlm->reco.new_master == NM_INVALID_NODE_NUM) {
+ if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
/* choose a new master */
if (!dlm_pick_recovery_master(dlm)) {
/* already notified everyone. go. */
@@ -507,8 +507,8 @@
spin_unlock(&dlm->spinlock);
while (1) {
- num = find_next_bit (dlm->reco.node_map, NM_MAX_NODES, num);
- if (num >= NM_MAX_NODES) {
+ num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
+ if (num >= O2NM_MAX_NODES) {
break;
}
DLM_ASSERT(num != dead_node);
@@ -1549,7 +1549,7 @@
}
-void dlm_hb_node_down_cb(struct nm_node *node, int idx, void *data)
+void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
{
dlm_ctxt *dlm = data;
dlm_master_list_entry *mle;
@@ -1597,7 +1597,7 @@
dlm_put(dlm);
}
-void dlm_hb_node_up_cb(struct nm_node *node, int idx, void *data)
+void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
{
dlm_ctxt *dlm = data;
dlm_master_list_entry *mle;
@@ -1689,7 +1689,7 @@
}
} else if (ret == DLM_NOTQUEUED) {
/* another node is master. wait on
- * reco.new_master != NM_INVALID_NODE_NUM */
+ * reco.new_master != O2NM_INVALID_NODE_NUM */
status = -EEXIST;
}
@@ -1761,11 +1761,11 @@
mlog(0, "node %u wants to recover node %u\n",
br->node_idx, br->dead_node);
spin_lock(&dlm->spinlock);
- if (dlm->reco.new_master != NM_INVALID_NODE_NUM) {
+ if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
mlog(0, "new_master already set to %u!\n",
dlm->reco.new_master);
}
- if (dlm->reco.dead_node != NM_INVALID_NODE_NUM) {
+ if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
mlog(0, "dead_node already set to %u!\n",
dlm->reco.dead_node);
}
Modified: trunk/fs/ocfs2/dlm/dlmthread.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmthread.c 2005-06-01 23:43:31 UTC (rev 2352)
+++ trunk/fs/ocfs2/dlm/dlmthread.c 2005-06-02 00:59:25 UTC (rev 2353)
@@ -173,7 +173,7 @@
spin_unlock(&dlm->spinlock);
again:
- ret = dlm_migrate_lockres(dlm, lockres, NM_MAX_NODES);
+ ret = dlm_migrate_lockres(dlm, lockres, O2NM_MAX_NODES);
if (ret == -ENOTEMPTY) {
mlog(ML_ERROR, "lockres %.*s still has local locks!\n",
lockres->lockname.len, lockres->lockname.name);
Modified: trunk/fs/ocfs2/heartbeat.c
===================================================================
--- trunk/fs/ocfs2/heartbeat.c 2005-06-01 23:43:31 UTC (rev 2352)
+++ trunk/fs/ocfs2/heartbeat.c 2005-06-02 00:59:25 UTC (rev 2353)
@@ -67,7 +67,7 @@
ocfs_node_map_init(&osb->umount_map);
}
-static void ocfs2_hb_node_down_cb(struct nm_node *node,
+static void ocfs2_hb_node_down_cb(struct o2nm_node *node,
int node_num,
void *data)
{
@@ -90,7 +90,7 @@
ocfs2_remove_node_from_vote_queues(osb, node_num);
}
-static void ocfs2_hb_node_up_cb(struct nm_node *node,
+static void ocfs2_hb_node_up_cb(struct o2nm_node *node,
int node_num,
void *data)
{
@@ -155,7 +155,7 @@
return;
}
- argv[0] = (char *)nm_get_hb_ctl_path();
+ argv[0] = (char *)o2nm_get_hb_ctl_path();
argv[1] = "-K";
argv[2] = "-u";
argv[3] = osb->uuid_str;
@@ -312,9 +312,9 @@
{
int i = idx;
- idx = NM_INVALID_NODE_NUM;
+ idx = O2NM_INVALID_NODE_NUM;
spin_lock(&osb->node_map_lock);
- if ((i != NM_INVALID_NODE_NUM) &&
+ if ((i != O2NM_INVALID_NODE_NUM) &&
(i >= 0) &&
(i < map->num_nodes)) {
while(i < map->num_nodes) {
Modified: trunk/fs/ocfs2/journal.c
===================================================================
--- trunk/fs/ocfs2/journal.c 2005-06-01 23:43:31 UTC (rev 2352)
+++ trunk/fs/ocfs2/journal.c 2005-06-02 00:59:25 UTC (rev 2353)
@@ -1009,7 +1009,7 @@
while(!ocfs_node_map_is_empty(osb, &osb->recovery_map)) {
node_num = ocfs_node_map_first_set_bit(osb,
&osb->recovery_map);
- if (node_num == NM_INVALID_NODE_NUM) {
+ if (node_num == O2NM_INVALID_NODE_NUM) {
mlog(0, "Out of nodes to recover.\n");
break;
}
Modified: trunk/fs/ocfs2/proc.c
===================================================================
--- trunk/fs/ocfs2/proc.c 2005-06-01 23:43:31 UTC (rev 2352)
+++ trunk/fs/ocfs2/proc.c 2005-06-02 00:59:25 UTC (rev 2353)
@@ -273,15 +273,15 @@
{
int len;
int ret;
- struct nm_node *node;
+ struct o2nm_node *node;
mlog_entry_void();
- node = nm_get_node_by_num(nm_this_node());
+ node = o2nm_get_node_by_num(o2nm_this_node());
if (node) {
len = sprintf(page, "%s\n", node->nd_name);
- nm_node_put(node);
+ o2nm_node_put(node);
} else
len = sprintf(page, "(unknown)\n");
Modified: trunk/fs/ocfs2/slot_map.c
===================================================================
--- trunk/fs/ocfs2/slot_map.c 2005-06-01 23:43:31 UTC (rev 2352)
+++ trunk/fs/ocfs2/slot_map.c 2005-06-02 00:59:25 UTC (rev 2353)
@@ -153,8 +153,8 @@
{
OCFS_ASSERT(slot_num != OCFS2_INVALID_SLOT);
OCFS_ASSERT(slot_num < si->si_num_slots);
- OCFS_ASSERT((node_num == NM_INVALID_NODE_NUM) ||
- (node_num < NM_MAX_NODES));
+ OCFS_ASSERT((node_num == O2NM_INVALID_NODE_NUM) ||
+ (node_num < O2NM_MAX_NODES));
si->si_global_node_nums[slot_num] = node_num;
}
Modified: trunk/fs/ocfs2/super.c
===================================================================
--- trunk/fs/ocfs2/super.c 2005-06-01 23:43:31 UTC (rev 2352)
+++ trunk/fs/ocfs2/super.c 2005-06-02 00:59:25 UTC (rev 2353)
@@ -802,8 +802,8 @@
/* XXX hold a ref on the node while mounte? easy enough, if
* desirable. */
- osb->node_num = nm_this_node();
- if (osb->node_num == NM_MAX_NODES) {
+ osb->node_num = o2nm_this_node();
+ if (osb->node_num == O2NM_MAX_NODES) {
mlog(ML_ERROR, "could not find this host's node number\n");
status = -ENOENT;
goto bail;
@@ -1117,7 +1117,7 @@
atomic_set(&osb->needs_checkpoint, 0);
atomic_set(&osb->clean_buffer_seq, 1);
- osb->node_num = NM_INVALID_NODE_NUM;
+ osb->node_num = O2NM_INVALID_NODE_NUM;
osb->slot_num = OCFS2_INVALID_SLOT;
osb->local_alloc_state = OCFS2_LA_UNUSED;
Modified: trunk/fs/ocfs2/vote.c
===================================================================
--- trunk/fs/ocfs2/vote.c 2005-06-01 23:43:31 UTC (rev 2352)
+++ trunk/fs/ocfs2/vote.c 2005-06-02 00:59:25 UTC (rev 2353)
@@ -571,7 +571,7 @@
i = ocfs_node_map_iterate(osb, &osb->mounted_map, 0);
- while (i != NM_INVALID_NODE_NUM) {
+ while (i != O2NM_INVALID_NODE_NUM) {
if (i != osb->node_num) {
mlog(0, "trying to send request to node %i\n", i);
ocfs_node_map_set_bit(osb, &w->n_node_map, i);
More information about the Ocfs2-commits
mailing list