[Ocfs2-commits] khackel commits r2372 - trunk/fs/ocfs2/dlm
svn-commits at oss.oracle.com
svn-commits at oss.oracle.com
Mon Jun 6 19:47:01 CDT 2005
Author: khackel
Signed-off-by: mfasheh
Date: 2005-06-06 19:46:58 -0500 (Mon, 06 Jun 2005)
New Revision: 2372
Modified:
trunk/fs/ocfs2/dlm/dlmast.c
trunk/fs/ocfs2/dlm/dlmcommon.h
trunk/fs/ocfs2/dlm/dlmconvert.c
trunk/fs/ocfs2/dlm/dlmdebug.c
trunk/fs/ocfs2/dlm/dlmdomain.c
trunk/fs/ocfs2/dlm/dlmlock.c
trunk/fs/ocfs2/dlm/dlmmaster.c
trunk/fs/ocfs2/dlm/dlmrecovery.c
trunk/fs/ocfs2/dlm/dlmthread.c
trunk/fs/ocfs2/dlm/dlmunlock.c
Log:
* removes all occurrences of strncmp and strncpy, replacing them with
explicit memcmp and memcpy with safe lengths
* removes a lot of useless assertions
* changed incorrect use of find_next_bit
* replace kmalloc/memset with kcalloc in many places
* yield in dlm_thread when necessary
* handle bad user input to /proc/dlm-debug a little better
* change DLM_ASSERT to use BUG_ON (may just switch all asserts to
standard kernel ones eventually)
* change TODO/printk into BUG() in a couple places
* better comments in several places
Signed-off-by: mfasheh
Modified: trunk/fs/ocfs2/dlm/dlmast.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmast.c 2005-06-07 00:22:16 UTC (rev 2371)
+++ trunk/fs/ocfs2/dlm/dlmast.c 2005-06-07 00:46:58 UTC (rev 2372)
@@ -198,12 +198,8 @@
mlog_entry_void();
- DLM_ASSERT(lock);
- DLM_ASSERT(res);
lksb = lock->lksb;
- DLM_ASSERT(lksb);
fn = lock->ast;
- DLM_ASSERT(fn);
DLM_ASSERT(lock->ml.node == dlm->node_num);
dlm_update_lvb(dlm, res, lock);
@@ -219,10 +215,7 @@
mlog_entry_void();
- DLM_ASSERT(lock);
- DLM_ASSERT(res);
lksb = lock->lksb;
- DLM_ASSERT(lksb);
DLM_ASSERT(lock->ml.node != dlm->node_num);
lksbflags = lksb->flags;
@@ -242,7 +235,6 @@
mlog_entry_void();
DLM_ASSERT(lock->ml.node == dlm->node_num);
- DLM_ASSERT(fn);
(*fn)(lock->astdata, blocked_type);
}
@@ -265,7 +257,8 @@
if (!dlm_grab(dlm))
return DLM_REJECTED;
- DLM_ASSERT(dlm_domain_fully_joined(dlm));
+ mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
+ "Domain %s not fully joined!\n", dlm->name);
dlm_proxy_ast_to_host(past);
name = past->name;
@@ -275,7 +268,7 @@
if (locklen > DLM_LOCKID_NAME_MAX) {
ret = DLM_IVBUFLEN;
- printk("Invalid name length in proxy ast handler!\n");
+ mlog(ML_ERROR, "Invalid name length in proxy ast handler!\n");
goto leave;
}
@@ -422,7 +415,7 @@
past.type = msg_type;
past.blocked_type = blocked_type;
past.namelen = res->lockname.len;
- strncpy(past.name, res->lockname.name, past.namelen);
+ memcpy(past.name, res->lockname.name, past.namelen);
past.cookie = lock->ml.cookie;
iov[0].iov_len = sizeof(dlm_proxy_ast);
Modified: trunk/fs/ocfs2/dlm/dlmcommon.h
===================================================================
--- trunk/fs/ocfs2/dlm/dlmcommon.h 2005-06-07 00:22:16 UTC (rev 2371)
+++ trunk/fs/ocfs2/dlm/dlmcommon.h 2005-06-07 00:46:58 UTC (rev 2372)
@@ -27,7 +27,7 @@
#include <linux/kref.h>
-#define DLM_ASSERT(x) ({ if (!(x)) { printk("assert failed! %s:%d\n", __FILE__, __LINE__); BUG(); } })
+#define DLM_ASSERT(x) BUG_ON(!(x))
#define DLM_HB_NODE_DOWN_PRI (0xf000000)
@@ -61,7 +61,7 @@
static inline int dlm_is_recovery_lock(const char *lock_name, int name_len)
{
if (name_len == DLM_RECOVERY_LOCK_NAME_LEN &&
- strncmp(lock_name, DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN)==0)
+ memcmp(lock_name, DLM_RECOVERY_LOCK_NAME, name_len)==0)
return 1;
return 0;
}
@@ -180,8 +180,6 @@
static inline void dlm_init_work_item(dlm_ctxt *dlm, dlm_work_item *i,
dlm_workfunc_t *f, void *data)
{
- DLM_ASSERT(i);
- DLM_ASSERT(f);
memset(i, 0, sizeof(dlm_work_item));
i->func = f;
INIT_LIST_HEAD(&i->list);
@@ -425,10 +423,36 @@
#define DLM_MRES_MIGRATION 0x02
#define DLM_MRES_ALL_DONE 0x04
-// NET_MAX_PAYLOAD_BYTES is roughly 4080
-// 240 * 16 = 3840
-// 3840 + 112 = 3952 bytes
-// leaves us about 128 bytes
+/*
+ * We would like to get one whole lockres into a single network
+ * message whenever possible. Generally speaking, there will be
+ * at most one dlm_lock on a lockres for each node in the cluster,
+ * plus (infrequently) any additional locks coming in from userdlm.
+ *
+ * struct _dlm_lockres_page
+ * {
+ * dlm_migratable_lockres mres;
+ * dlm_migratable_lock ml[DLM_MAX_MIGRATABLE_LOCKS];
+ * u8 pad[DLM_MIG_LOCKRES_RESERVED];
+ * };
+ *
+ * from ../cluster/tcp.h
+ * NET_MAX_PAYLOAD_BYTES (4096 - sizeof(net_msg))
+ * (roughly 4080 bytes)
+ * and sizeof(dlm_migratable_lockres) = 112 bytes
+ * and sizeof(dlm_migratable_lock) = 16 bytes
+ *
+ * Choosing DLM_MAX_MIGRATABLE_LOCKS=240 and
+ * DLM_MIG_LOCKRES_RESERVED=128 means we have this:
+ *
+ * (DLM_MAX_MIGRATABLE_LOCKS * sizeof(dlm_migratable_lock)) +
+ * sizeof(dlm_migratable_lockres) + DLM_MIG_LOCKRES_RESERVED =
+ * NET_MAX_PAYLOAD_BYTES
+ * (240 * 16) + 112 + 128 = 4080
+ *
+ * So a lockres would need more than 240 locks before it would
+ * use more than one network packet to recover. Not too bad.
+ */
#define DLM_MAX_MIGRATABLE_LOCKS 240
typedef struct _dlm_migratable_lockres
@@ -451,6 +475,11 @@
(sizeof(dlm_migratable_lock) * \
DLM_MAX_MIGRATABLE_LOCKS) )
+/* from above, 128 bytes
+ * for some undetermined future use */
+#define DLM_MIG_LOCKRES_RESERVED (NET_MAX_PAYLOAD_BYTES - \
+ DLM_MIG_LOCKRES_MAX_LEN)
+
typedef struct _dlm_create_lock
{
u64 cookie;
@@ -925,13 +954,9 @@
void dlm_clean_master_list(dlm_ctxt *dlm, u8 dead_node);
-#define DLM_MLE_DEBUG 1
+int dlm_dump_all_mles(const char __user *data, unsigned int len);
-#ifdef DLM_MLE_DEBUG
-void dlm_dump_all_mles(const char __user *data, int len);
-#endif
-
static inline const char * dlm_lock_mode_name(int mode)
{
switch (mode) {
@@ -990,12 +1015,12 @@
if (mle->type == DLM_MLE_BLOCK ||
mle->type == DLM_MLE_MIGRATION) {
if (namelen != mle->u.name.len ||
- strncmp(name, mle->u.name.name, namelen)!=0)
+ memcmp(name, mle->u.name.name, namelen)!=0)
return 0;
} else {
res = mle->u.res;
if (namelen != res->lockname.len ||
- strncmp(res->lockname.name, name, namelen) != 0)
+ memcmp(res->lockname.name, name, namelen) != 0)
return 0;
}
return 1;
@@ -1020,7 +1045,6 @@
static inline void dlm_node_iter_init(unsigned long *map, dlm_node_iter *iter)
{
- DLM_ASSERT(iter);
memcpy(iter->node_map, map, sizeof(iter->node_map));
iter->curnode = -1;
}
@@ -1028,7 +1052,6 @@
static inline int dlm_node_iter_next(dlm_node_iter *iter)
{
int bit;
- DLM_ASSERT(iter);
bit = find_next_bit(iter->node_map, O2NM_MAX_NODES, iter->curnode+1);
if (bit >= O2NM_MAX_NODES) {
iter->curnode = O2NM_MAX_NODES;
Modified: trunk/fs/ocfs2/dlm/dlmconvert.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmconvert.c 2005-06-07 00:22:16 UTC (rev 2371)
+++ trunk/fs/ocfs2/dlm/dlmconvert.c 2005-06-07 00:46:58 UTC (rev 2372)
@@ -337,7 +337,7 @@
convert.cookie = lock->ml.cookie;
convert.namelen = res->lockname.len;
convert.flags = flags;
- strncpy(convert.name, res->lockname.name, convert.namelen);
+ memcpy(convert.name, res->lockname.name, convert.namelen);
iov[0].iov_len = sizeof(dlm_convert_lock);
iov[0].iov_base = &convert;
@@ -397,7 +397,8 @@
if (!dlm_grab(dlm))
return DLM_REJECTED;
- DLM_ASSERT(dlm_domain_fully_joined(dlm));
+ mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
+ "Domain %s not fully joined!\n", dlm->name);
dlm_convert_lock_to_host(cnv);
Modified: trunk/fs/ocfs2/dlm/dlmdebug.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmdebug.c 2005-06-07 00:22:16 UTC (rev 2371)
+++ trunk/fs/ocfs2/dlm/dlmdebug.c 2005-06-07 00:46:58 UTC (rev 2372)
@@ -46,12 +46,13 @@
#define MLOG_MASK_PREFIX ML_DLM
#include "cluster/masklog.h"
-static void dlm_dump_all_lock_resources(const char __user *data, int len);
+static int dlm_dump_all_lock_resources(const char __user *data,
+ unsigned int len);
static void dlm_dump_purge_list(dlm_ctxt *dlm);
-static void dlm_dump_all_purge_lists(const char __user *data, int len);
-static void dlm_trigger_migration(const char __user *data, int len);
+static int dlm_dump_all_purge_lists(const char __user *data, unsigned int len);
+static int dlm_trigger_migration(const char __user *data, unsigned int len);
-typedef void (dlm_debug_func_t)(const char __user *data, int len);
+typedef int (dlm_debug_func_t)(const char __user *data, unsigned int len);
typedef struct _dlm_debug_funcs
{
@@ -61,9 +62,7 @@
static dlm_debug_funcs dlm_debug_map[] = {
{ 'r', dlm_dump_all_lock_resources },
-#ifdef DLM_MLE_DEBUG
{ 'm', dlm_dump_all_mles },
-#endif
{ 'p', dlm_dump_all_purge_lists },
{ 'M', dlm_trigger_migration },
};
@@ -76,25 +75,30 @@
int i;
char c;
dlm_debug_func_t *fn;
+ int ret;
- printk("(%p, %p, %u, %lld)\n",
+ mlog(0, "(%p, %p, %u, %lld)\n",
file, buf, (unsigned int)count, (long long)*ppos);
- if (!count)
- return 0;
+ ret = 0;
+ if (count<=0)
+ goto done;
+ ret = -EFAULT;
if (get_user(c, buf))
- return -EFAULT;
+ goto done;
+ ret = count;
for (i=0; i < dlm_debug_map_sz; i++) {
dlm_debug_funcs *d = &dlm_debug_map[i];
if (c == d->key) {
fn = d->func;
if (fn)
- (fn)(buf, count);
- break;
+ ret = (fn)(buf, count);
+ goto done;
}
}
- return count;
+done:
+ return ret;
}
static struct file_operations dlm_debug_operations = {
@@ -109,12 +113,16 @@
entry->proc_fops = &dlm_debug_operations;
}
-static void dlm_dump_all_lock_resources(const char __user *data, int len)
+/* lock resource printing is usually very important (printed
+ * right before a BUG in some cases), but we'd like to be
+ * able to shut it off if needed, hence the KERN_NOTICE level */
+static int dlm_dump_all_lock_resources(const char __user *data,
+ unsigned int len)
{
dlm_ctxt *dlm;
struct list_head *iter;
- printk("dumping ALL dlm state for node %s\n",
+ mlog(ML_NOTICE, "dumping ALL dlm state for node %s\n",
system_utsname.nodename);
spin_lock(&dlm_domain_lock);
list_for_each(iter, &dlm_domains) {
@@ -122,6 +130,7 @@
dlm_dump_lock_resources(dlm);
}
spin_unlock(&dlm_domain_lock);
+ return len;
}
static void dlm_print_one_lock_resource(dlm_lock_resource *res)
@@ -129,35 +138,35 @@
struct list_head *iter2;
dlm_lock *lock;
- printk("lockres: %.*s, owner=%u, state=%u\n",
+ mlog(ML_NOTICE, "lockres: %.*s, owner=%u, state=%u\n",
res->lockname.len, res->lockname.name,
res->owner, res->state);
spin_lock(&res->spinlock);
- printk(" granted queue: \n");
+ mlog(ML_NOTICE, " granted queue: \n");
list_for_each(iter2, &res->granted) {
lock = list_entry(iter2, dlm_lock, list);
spin_lock(&lock->spinlock);
- printk(" type=%d, conv=%d, node=%u, "
+ mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, "
"cookie=%"MLFu64"\n", lock->ml.type,
lock->ml.convert_type, lock->ml.node,
lock->ml.cookie);
spin_unlock(&lock->spinlock);
}
- printk(" converting queue: \n");
+ mlog(ML_NOTICE, " converting queue: \n");
list_for_each(iter2, &res->converting) {
lock = list_entry(iter2, dlm_lock, list);
spin_lock(&lock->spinlock);
- printk(" type=%d, conv=%d, node=%u, "
+ mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, "
"cookie=%"MLFu64"\n", lock->ml.type,
lock->ml.convert_type, lock->ml.node,
lock->ml.cookie);
spin_unlock(&lock->spinlock);
}
- printk(" blocked queue: \n");
+ mlog(ML_NOTICE, " blocked queue: \n");
list_for_each(iter2, &res->blocked) {
lock = list_entry(iter2, dlm_lock, list);
spin_lock(&lock->spinlock);
- printk(" type=%d, conv=%d, node=%u, "
+ mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, "
"cookie=%"MLFu64"\n", lock->ml.type,
lock->ml.convert_type, lock->ml.node,
lock->ml.cookie);
@@ -179,9 +188,8 @@
struct list_head *bucket;
int i;
- printk("dlm_ctxt: %s, node=%u, key=%u\n",
+ mlog(ML_NOTICE, "dlm_ctxt: %s, node=%u, key=%u\n",
dlm->name, dlm->node_num, dlm->key);
- printk("some bug here... should not have to check for this...\n");
if (!dlm || !dlm->name) {
mlog(ML_ERROR, "dlm=%p\n", dlm);
return;
@@ -203,22 +211,22 @@
struct list_head *iter;
dlm_lock_resource *lockres;
- printk("Purge list for DLM Domain \"%s\"\n", dlm->name);
- printk("Last_used\tName\n");
+ mlog(ML_NOTICE, "Purge list for DLM Domain \"%s\"\n", dlm->name);
+ mlog(ML_NOTICE, "Last_used\tName\n");
spin_lock(&dlm->spinlock);
list_for_each(iter, &dlm->purge_list) {
lockres = list_entry(iter, dlm_lock_resource, purge);
spin_lock(&lockres->spinlock);
- printk("%lu\t%.*s\n", lockres->last_used,
+ mlog(ML_NOTICE, "%lu\t%.*s\n", lockres->last_used,
lockres->lockname.len, lockres->lockname.name);
spin_unlock(&lockres->spinlock);
}
spin_unlock(&dlm->spinlock);
}
-static void dlm_dump_all_purge_lists(const char __user *data, int len)
+static int dlm_dump_all_purge_lists(const char __user *data, unsigned int len)
{
dlm_ctxt *dlm;
struct list_head *iter;
@@ -229,28 +237,32 @@
dlm_dump_purge_list(dlm);
}
spin_unlock(&dlm_domain_lock);
+ return len;
}
-static void dlm_trigger_migration(const char __user *data, int len)
+static int dlm_trigger_migration(const char __user *data, unsigned int len)
{
dlm_lock_resource *res;
dlm_ctxt *dlm;
char *resname;
char *domainname;
char *tmp, *buf = NULL;
+ int ret = -EINVAL;
+ int tmpret;
- if (len >= PAGE_SIZE) {
- mlog(0, "user passed too much data: %d bytes\n", len);
- return;
+ if (len >= PAGE_SIZE-1) {
+ mlog(ML_ERROR, "user passed too much data: %d bytes\n", len);
+ goto leave;
}
if (len < 5) {
- mlog(0, "user passed too little data: %d bytes\n", len);
- return;
+ mlog(ML_ERROR, "user passed too little data: %d bytes\n", len);
+ goto leave;
}
buf = kmalloc(len+1, GFP_KERNEL);
if (!buf) {
- mlog(ML_ERROR, "could not alloc %d bytes\n", len);
- return;
+ mlog(ML_ERROR, "could not alloc %d bytes\n", len+1);
+ ret = -ENOMEM;
+ goto leave;
}
if (strncpy_from_user(buf, data, len) < len) {
mlog(ML_ERROR, "failed to get all user data. done.\n");
@@ -295,7 +307,7 @@
tmp++;
}
- printk("now looking up domain %s, lockres %s\n",
+ mlog(0, "now looking up domain %s, lockres %s\n",
domainname, resname);
spin_lock(&dlm_domain_lock);
dlm = __dlm_lookup_domain(domainname);
@@ -306,7 +318,7 @@
goto leave;
}
- res = dlm_lookup_lockres(dlm, resname, strlen(resname));
+ res = dlm_lookup_lockres(dlm, resname, strlen(resname));
if (!res) {
mlog(ML_ERROR, "bad lockres!\n");
dlm_put(dlm);
@@ -314,16 +326,19 @@
}
mlog(0, "found dlm=%p, lockres=%p\n", dlm, res);
- {
- int ret;
- ret = dlm_migrate_lockres(dlm, res, O2NM_MAX_NODES);
- printk("dlm_migrate_lockres returned %d\n", ret);
- }
+ tmpret = dlm_migrate_lockres(dlm, res, O2NM_MAX_NODES);
+ mlog(0, "dlm_migrate_lockres returned %d\n", tmpret);
+ if (tmpret < 0)
+ mlog(ML_ERROR, "failed to migrate %s: %d\n",
+ resname, tmpret);
dlm_lockres_put(res);
dlm_put(dlm);
+ ret = len;
leave:
- kfree(buf);
+ if (buf)
+ kfree(buf);
+ return ret;
}
static const char *dlm_errnames[] = {
@@ -437,7 +452,7 @@
{
int err;
for (err=DLM_NORMAL-1; err<=DLM_MAXSTATS+1; err++) {
- printk("(error %d) %s: %s\n", err,
+ mlog(0, "(error %d) %s: %s\n", err,
dlm_errname(err), dlm_errmsg(err));
}
}
Modified: trunk/fs/ocfs2/dlm/dlmdomain.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmdomain.c 2005-06-07 00:22:16 UTC (rev 2371)
+++ trunk/fs/ocfs2/dlm/dlmdomain.c 2005-06-07 00:46:58 UTC (rev 2372)
@@ -107,8 +107,6 @@
dlm_lock_resource *tmpres=NULL;
struct list_head *bucket;
- BUG_ON(!name);
-
mlog_entry("%.*s\n", len, name);
assert_spin_locked(&dlm->spinlock);
@@ -121,7 +119,7 @@
list_for_each(iter, bucket) {
tmpres = list_entry(iter, dlm_lock_resource, list);
if (tmpres->lockname.len == len &&
- strncmp(tmpres->lockname.name, name, len) == 0) {
+ memcmp(tmpres->lockname.name, name, len) == 0) {
dlm_lockres_get(tmpres);
break;
}
@@ -137,8 +135,6 @@
{
dlm_lock_resource *res;
- BUG_ON(!dlm);
-
spin_lock(&dlm->spinlock);
res = __dlm_lookup_lockres(dlm, name, len);
spin_unlock(&dlm->spinlock);
@@ -152,9 +148,12 @@
assert_spin_locked(&dlm_domain_lock);
+ /* tmp->name here is always NULL terminated,
+ * but domain may not be! */
list_for_each(iter, &dlm_domains) {
tmp = list_entry (iter, dlm_ctxt, list);
- if (strncmp(tmp->name, domain, len)==0)
+ if (strlen(tmp->name) == len &&
+ memcmp(tmp->name, domain, len)==0)
break;
tmp = NULL;
}
@@ -193,8 +192,6 @@
static void dlm_free_ctxt_mem(dlm_ctxt *dlm)
{
- BUG_ON(!dlm);
-
if (dlm->resources)
free_page((unsigned long) dlm->resources);
@@ -211,8 +208,6 @@
{
dlm_ctxt *dlm;
- BUG_ON(!kref);
-
dlm = container_of(kref, dlm_ctxt, dlm_refs);
BUG_ON(dlm->num_joins);
@@ -234,8 +229,6 @@
void dlm_put(dlm_ctxt *dlm)
{
- BUG_ON(!dlm);
-
spin_lock(&dlm_domain_lock);
kref_put(&dlm->dlm_refs, dlm_ctxt_release);
spin_unlock(&dlm_domain_lock);
@@ -274,8 +267,6 @@
void dlm_get(dlm_ctxt *dlm)
{
- BUG_ON(!dlm);
-
spin_lock(&dlm_domain_lock);
__dlm_get(dlm);
spin_unlock(&dlm_domain_lock);
@@ -387,10 +378,8 @@
mlog(ML_NOTICE, "Nodes in my domain (\"%s\"):\n", dlm->name);
- while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, node + 1))
- != -1) {
- if (node >= O2NM_MAX_NODES)
- break;
+ while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
+ node + 1)) < O2NM_MAX_NODES) {
mlog(ML_NOTICE, " node %d\n", node);
}
}
@@ -458,11 +447,8 @@
spin_lock(&dlm->spinlock);
/* Clear ourselves from the domain map */
clear_bit(dlm->node_num, dlm->domain_map);
- while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0))
- != -1) {
- if (node >= O2NM_MAX_NODES)
- break;
-
+ while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
+ 0)) < O2NM_MAX_NODES) {
/* Drop the dlm spinlock. This is safe wrt the domain_map.
* -nodes cannot be added now as the
* query_join_handlers knows to respond with OK_NO_MAP
@@ -514,8 +500,6 @@
{
int leave = 0;
- BUG_ON(!dlm);
-
spin_lock(&dlm_domain_lock);
BUG_ON(dlm->dlm_state != DLM_CTXT_JOINED);
BUG_ON(!dlm->num_joins);
@@ -667,7 +651,7 @@
memset(&cancel_msg, 0, sizeof(cancel_msg));
cancel_msg.node_idx = dlm->node_num;
cancel_msg.name_len = strlen(dlm->name);
- strncpy(cancel_msg.domain, dlm->name, cancel_msg.name_len);
+ memcpy(cancel_msg.domain, dlm->name, cancel_msg.name_len);
dlm_cancel_join_to_net(&cancel_msg);
@@ -696,11 +680,8 @@
status = 0;
node = -1;
- while ((node = find_next_bit(node_map, O2NM_MAX_NODES, node + 1))
- != -1) {
- if (node >= O2NM_MAX_NODES)
- break;
-
+ while ((node = find_next_bit(node_map, O2NM_MAX_NODES,
+ node + 1)) < O2NM_MAX_NODES) {
if (node == dlm->node_num)
continue;
@@ -728,7 +709,7 @@
memset(&join_msg, 0, sizeof(join_msg));
join_msg.node_idx = dlm->node_num;
join_msg.name_len = strlen(dlm->name);
- strncpy(join_msg.domain, dlm->name, join_msg.name_len);
+ memcpy(join_msg.domain, dlm->name, join_msg.name_len);
dlm_query_join_request_to_net(&join_msg);
@@ -778,7 +759,7 @@
memset(&assert_msg, 0, sizeof(assert_msg));
assert_msg.node_idx = dlm->node_num;
assert_msg.name_len = strlen(dlm->name);
- strncpy(assert_msg.domain, dlm->name, assert_msg.name_len);
+ memcpy(assert_msg.domain, dlm->name, assert_msg.name_len);
dlm_assert_joined_to_net(&assert_msg);
@@ -798,11 +779,8 @@
status = 0;
node = -1;
- while ((node = find_next_bit(node_map, O2NM_MAX_NODES, node + 1))
- != -1) {
- if (node >= O2NM_MAX_NODES)
- break;
-
+ while ((node = find_next_bit(node_map, O2NM_MAX_NODES,
+ node + 1)) < O2NM_MAX_NODES) {
if (node == dlm->node_num)
continue;
@@ -865,13 +843,12 @@
mlog_entry("%p", dlm);
- ctxt = kmalloc(sizeof(struct domain_join_ctxt), GFP_KERNEL);
+ ctxt = kcalloc(1, sizeof(*ctxt), GFP_KERNEL);
if (!ctxt) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
- memset(ctxt, 0, sizeof(*ctxt));
/* group sem locking should work for us here -- we're already
* registered for heartbeat events so filling this should be
@@ -886,11 +863,8 @@
spin_unlock(&dlm->spinlock);
node = -1;
- while ((node = find_next_bit(ctxt->live_map, O2NM_MAX_NODES, node + 1))
- != -1) {
- if (node >= O2NM_MAX_NODES)
- break;
-
+ while ((node = find_next_bit(ctxt->live_map, O2NM_MAX_NODES,
+ node + 1)) < O2NM_MAX_NODES) {
if (node == dlm->node_num)
continue;
@@ -1147,12 +1121,11 @@
int i;
dlm_ctxt *dlm = NULL;
- dlm = kmalloc(sizeof(dlm_ctxt), GFP_KERNEL);
+ dlm = kcalloc(1, sizeof(*dlm), GFP_KERNEL);
if (!dlm) {
mlog_errno(-ENOMEM);
goto leave;
}
- memset(dlm, 0, sizeof(dlm_ctxt));
dlm->name = kmalloc(strlen(domain) + 1, GFP_KERNEL);
if (dlm->name == NULL) {
@@ -1313,7 +1286,6 @@
}
EXPORT_SYMBOL_GPL(dlm_register_domain);
-
static LIST_HEAD(dlm_join_handlers);
static void dlm_unregister_net_handlers(void)
Modified: trunk/fs/ocfs2/dlm/dlmlock.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmlock.c 2005-06-07 00:22:16 UTC (rev 2371)
+++ trunk/fs/ocfs2/dlm/dlmlock.c 2005-06-07 00:46:58 UTC (rev 2372)
@@ -104,11 +104,6 @@
int call_ast = 0, kick_thread = 0;
dlm_status status = DLM_NORMAL;
- DLM_ASSERT(lock);
- DLM_ASSERT(res);
- DLM_ASSERT(dlm);
- DLM_ASSERT(lock->lksb);
-
mlog_entry("type=%d\n", lock->ml.type);
spin_lock(&res->spinlock);
@@ -241,7 +236,7 @@
create.cookie = lock->ml.cookie;
create.namelen = res->lockname.len;
create.flags = flags;
- strncpy(create.name, res->lockname.name, create.namelen);
+ memcpy(create.name, res->lockname.name, create.namelen);
dlm_create_lock_to_net(&create);
tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create,
@@ -272,11 +267,9 @@
dlm_lock *lock;
dlm_lockstatus *lksb;
- DLM_ASSERT(kref);
lock = container_of(kref, dlm_lock, lock_refs);
lksb = lock->lksb;
- DLM_ASSERT(lksb);
DLM_ASSERT(lksb->lockid == lock);
DLM_ASSERT(list_empty(&lock->list));
DLM_ASSERT(list_empty(&lock->ast_list));
@@ -299,8 +292,6 @@
/* associate a lock with it's lockres, getting a ref on the lockres */
void dlm_lock_attach_lockres(dlm_lock *lock, dlm_lock_resource *res)
{
- DLM_ASSERT(lock);
- DLM_ASSERT(res);
dlm_lockres_get(res);
lock->lockres = res;
}
@@ -310,7 +301,6 @@
{
dlm_lock_resource *res;
- DLM_ASSERT(lock);
res = lock->lockres;
if (res) {
lock->lockres = NULL;
@@ -329,6 +319,9 @@
newlock->ml.convert_type = LKM_IVMODE;
newlock->ml.highest_blocked = LKM_IVMODE;
newlock->ml.node = node;
+ newlock->ml.pad1 = 0;
+ newlock->ml.list = 0;
+ newlock->ml.flags = 0;
newlock->ast = NULL;
newlock->bast = NULL;
newlock->astdata = NULL;
@@ -342,20 +335,17 @@
{
dlm_lock *lock;
- lock = kmalloc(sizeof(dlm_lock), GFP_KERNEL);
+ lock = kcalloc(1, sizeof(*lock), GFP_KERNEL);
if (!lock)
return NULL;
- memset(lock, 0, sizeof(dlm_lock));
-
if (!lksb) {
- lksb = kmalloc(sizeof(dlm_lockstatus), GFP_KERNEL);
+ /* zero memory only if kernel-allocated */
+ lksb = kcalloc(1, sizeof(*lksb), GFP_KERNEL);
if (!lksb) {
kfree(lock);
return NULL;
}
- /* memset only if kernel-allocated */
- memset(lksb, 0, sizeof(dlm_lockstatus));
lksb->flags |= DLM_LKSB_KERNEL_ALLOCATED;
}
@@ -390,7 +380,8 @@
if (!dlm_grab(dlm))
return DLM_REJECTED;
- DLM_ASSERT(dlm_domain_fully_joined(dlm));
+ mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
+ "Domain %s not fully joined!\n", dlm->name);
dlm_create_lock_to_host(create);
name = create->name;
Modified: trunk/fs/ocfs2/dlm/dlmmaster.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmmaster.c 2005-06-07 00:22:16 UTC (rev 2371)
+++ trunk/fs/ocfs2/dlm/dlmmaster.c 2005-06-07 00:46:58 UTC (rev 2372)
@@ -51,7 +51,6 @@
#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
#include "cluster/masklog.h"
-#ifdef DLM_MLE_DEBUG
static void dlm_dump_mles(dlm_ctxt *dlm);
static void dlm_dump_mles(dlm_ctxt *dlm)
@@ -65,8 +64,8 @@
unsigned int namelen;
const char *name;
- printk("dumping all mles for domain %s:\n", dlm->name);
- printk(" ####: type refs owner events? err? lockname\n");
+ mlog(ML_NOTICE, "dumping all mles for domain %s:\n", dlm->name);
+ mlog(ML_NOTICE, " ####: type refs owner events? err? lockname\n");
spin_lock(&dlm->master_lock);
list_for_each(iter, &dlm->master_list) {
@@ -88,7 +87,7 @@
name = mle->u.res->lockname.name;
}
- printk(" #%3d: %3s %3d %3u %c %c (%d)%.*s\n",
+ mlog(ML_NOTICE, " #%3d: %3s %3d %3u %c %c (%d)%.*s\n",
i, type, refs, master, attached, err,
namelen, namelen, name);
}
@@ -99,7 +98,7 @@
extern spinlock_t dlm_domain_lock;
extern struct list_head dlm_domains;
-void dlm_dump_all_mles(const char __user *data, int len)
+int dlm_dump_all_mles(const char __user *data, unsigned int len)
{
struct list_head *iter;
dlm_ctxt *dlm;
@@ -107,13 +106,13 @@
spin_lock(&dlm_domain_lock);
list_for_each(iter, &dlm_domains) {
dlm = list_entry (iter, dlm_ctxt, list);
- printk("found dlm: %p, name=%s\n", dlm, dlm->name);
+ mlog(ML_NOTICE, "found dlm: %p, name=%s\n", dlm, dlm->name);
dlm_dump_mles(dlm);
}
spin_unlock(&dlm_domain_lock);
+ return len;
}
EXPORT_SYMBOL_GPL(dlm_dump_all_mles);
-#endif
static kmem_cache_t *dlm_mle_cache = NULL;
@@ -202,8 +201,6 @@
static void __dlm_put_mle(dlm_master_list_entry *mle)
{
dlm_ctxt *dlm;
- DLM_ASSERT(mle);
- DLM_ASSERT(mle->dlm);
dlm = mle->dlm;
assert_spin_locked(&dlm->spinlock);
@@ -217,8 +214,6 @@
static void dlm_put_mle(dlm_master_list_entry *mle)
{
dlm_ctxt *dlm;
- DLM_ASSERT(mle);
- DLM_ASSERT(mle->dlm);
dlm = mle->dlm;
spin_lock(&dlm->spinlock);
@@ -261,11 +256,11 @@
mle->u.res = res;
} else if (mle->type == DLM_MLE_BLOCK) {
DLM_ASSERT(name);
- strncpy(mle->u.name.name, name, namelen);
+ memcpy(mle->u.name.name, name, namelen);
mle->u.name.len = namelen;
} else /* DLM_MLE_MIGRATION */ {
DLM_ASSERT(name);
- strncpy(mle->u.name.name, name, namelen);
+ memcpy(mle->u.name.name, name, namelen);
mle->u.name.len = namelen;
}
@@ -304,9 +299,6 @@
void dlm_mle_node_down(dlm_ctxt *dlm, dlm_master_list_entry *mle,
struct o2nm_node *node, int idx)
{
- DLM_ASSERT(mle);
- DLM_ASSERT(dlm);
-
spin_lock(&mle->spinlock);
if (!test_bit(idx, mle->node_map))
@@ -326,9 +318,6 @@
void dlm_mle_node_up(dlm_ctxt *dlm, dlm_master_list_entry *mle,
struct o2nm_node *node, int idx)
{
- DLM_ASSERT(mle);
- DLM_ASSERT(dlm);
-
spin_lock(&mle->spinlock);
#if 0
@@ -372,11 +361,7 @@
mlog_entry_void();
- DLM_ASSERT(kref);
-
mle = container_of(kref, dlm_master_list_entry, mle_refs);
-
- DLM_ASSERT(mle->dlm);
dlm = mle->dlm;
if (mle->type == DLM_MLE_BLOCK) {
@@ -447,8 +432,6 @@
{
dlm_lock_resource *res;
- BUG_ON(!kref);
-
res = container_of(kref, dlm_lock_resource, refs);
/* This should not happen -- all lockres' have a name
@@ -493,7 +476,7 @@
* correctly! */
qname = (char *) res->lockname.name;
- strncpy(qname, name, namelen);
+ memcpy(qname, name, namelen);
res->lockname.len = namelen;
res->lockname.hash = full_name_hash(name, namelen);
@@ -572,7 +555,6 @@
unsigned int namelen;
BUG_ON(!lockid);
- BUG_ON(!dlm);
namelen = strlen(lockid);
@@ -743,7 +725,7 @@
sizeof(mle->vote_map)) != 0);
voting_done = (memcmp(mle->vote_map, mle->response_map,
sizeof(mle->vote_map)) == 0);
-
+
/* restart if we hit any errors */
if (mle->error || map_changed) {
if (mle->error) {
@@ -855,10 +837,10 @@
if (mle->type == DLM_MLE_BLOCK) {
request.namelen = mle->u.name.len;
- strncpy(request.name, mle->u.name.name, request.namelen);
+ memcpy(request.name, mle->u.name.name, request.namelen);
} else {
request.namelen = mle->u.res->lockname.len;
- strncpy(request.name, mle->u.res->lockname.name,
+ memcpy(request.name, mle->u.res->lockname.name,
request.namelen);
}
@@ -1143,7 +1125,7 @@
memset(&assert, 0, sizeof(assert));
assert.node_idx = dlm->node_num;
assert.namelen = namelen;
- strncpy(assert.name, lockname, namelen);
+ memcpy(assert.name, lockname, namelen);
assert.flags = flags;
dlm_assert_master_to_net(&assert);
@@ -1330,13 +1312,12 @@
int ignore_higher, u8 request_from, u32 flags)
{
dlm_work_item *item;
- item = (dlm_work_item *)kmalloc(sizeof(dlm_work_item), GFP_KERNEL);
+ item = kcalloc(1, sizeof(*item), GFP_KERNEL);
if (!item)
return -ENOMEM;
/* queue up work for dlm_assert_master_worker */
- memset(item, 0, sizeof(dlm_work_item));
dlm_grab(dlm); /* get an extra ref for the work item */
dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
item->u.am.lockres = res; /* already have a ref */
@@ -1364,13 +1345,8 @@
u8 request_from;
u32 flags;
- DLM_ASSERT(item);
dlm = item->dlm;
- DLM_ASSERT(dlm);
-
res = item->u.am.lockres;
- DLM_ASSERT(res);
-
ignore_higher = item->u.am.ignore_higher;
request_from = item->u.am.request_from;
flags = item->u.am.flags;
@@ -1458,7 +1434,6 @@
for (i=0; i<3; i++) {
list_for_each(iter, queue) {
lock = list_entry (iter, dlm_lock, list);
- DLM_ASSERT(lock);
empty = 0;
if (lock->ml.node == dlm->node_num) {
mlog(0, "found a lock owned by this node "
@@ -1706,7 +1681,6 @@
for (i=0; i<3; i++) {
list_for_each_safe(iter, iter2, queue) {
lock = list_entry (iter, dlm_lock, list);
- DLM_ASSERT(lock);
if (lock->ml.node != dlm->node_num) {
mlog(0, "putting lock for node %u\n",
lock->ml.node);
@@ -1784,7 +1758,7 @@
memset(&migrate, 0, sizeof(migrate));
migrate.namelen = res->lockname.len;
- strncpy(migrate.name, res->lockname.name, migrate.namelen);
+ memcpy(migrate.name, res->lockname.name, migrate.namelen);
migrate.new_master = new_master;
migrate.master = master;
Modified: trunk/fs/ocfs2/dlm/dlmrecovery.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmrecovery.c 2005-06-07 00:22:16 UTC (rev 2371)
+++ trunk/fs/ocfs2/dlm/dlmrecovery.c 2005-06-07 00:46:58 UTC (rev 2372)
@@ -131,22 +131,18 @@
dlm_work_item *item;
dlm_workfunc_t *workfunc;
- DLM_ASSERT(dlm);
-
spin_lock(&dlm->work_lock);
list_splice_init(&dlm->work_list, &tmp_list);
spin_unlock(&dlm->work_lock);
list_for_each_safe(iter, iter2, &tmp_list) {
item = list_entry(iter, dlm_work_item, list);
- DLM_ASSERT(item);
workfunc = item->func;
list_del_init(&item->list);
/* already have ref on dlm to avoid having
* it disappear. just double-check. */
DLM_ASSERT(item->dlm == dlm);
- DLM_ASSERT(workfunc);
/* this is allowed to sleep and
* call network stuff */
@@ -513,12 +509,11 @@
}
DLM_ASSERT(num != dead_node);
- ndata = kmalloc(sizeof(dlm_reco_node_data), GFP_KERNEL);
+ ndata = kcalloc(1, sizeof(*ndata), GFP_KERNEL);
if (!ndata) {
dlm_destroy_recovery_area(dlm, dead_node);
return -ENOMEM;
}
- memset(ndata, 0, sizeof(dlm_reco_node_data));
ndata->node_num = num;
ndata->state = DLM_RECO_NODE_DATA_INIT;
spin_lock(&dlm_reco_state_lock);
@@ -587,10 +582,9 @@
return -EINVAL;
dlm_lock_request_to_host(lr);
- DLM_ASSERT(dlm);
DLM_ASSERT(lr->dead_node == dlm->reco.dead_node);
- item = (dlm_work_item *)kmalloc(sizeof(dlm_work_item), GFP_KERNEL);
+ item = kcalloc(1, sizeof(*item), GFP_KERNEL);
if (!item) {
dlm_put(dlm);
return -ENOMEM;
@@ -605,7 +599,6 @@
}
/* queue up work for dlm_request_all_locks_worker */
- memset(item, 0, sizeof(dlm_work_item));
dlm_grab(dlm); /* get an extra ref for the work item */
dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
item->u.ral.reco_master = lr->node_idx;
@@ -629,17 +622,12 @@
int ret;
u8 dead_node, reco_master;
- /* do a whole s-load of asserts */
- DLM_ASSERT(item);
dlm = item->dlm;
- DLM_ASSERT(dlm);
-
dead_node = item->u.ral.dead_node;
reco_master = item->u.ral.reco_master;
DLM_ASSERT(dead_node == dlm->reco.dead_node);
DLM_ASSERT(reco_master == dlm->reco.new_master);
- DLM_ASSERT(data);
mres = (dlm_migratable_lockres *)data;
/* lock resources should have already been moved to the
@@ -677,12 +665,9 @@
int ret, tmpret;
dlm_reco_data_done *done_msg;
- done_msg = (dlm_reco_data_done *)kmalloc(sizeof(dlm_reco_data_done),
- GFP_KERNEL);
+ done_msg = kcalloc(1, sizeof(*done_msg), GFP_KERNEL);
if (!done_msg)
return -ENOMEM;
-
- memset(done_msg, 0, sizeof(dlm_reco_data_done));
done_msg->node_idx = dlm->node_num;
done_msg->dead_node = dead_node;
dlm_reco_data_done_to_net(done_msg);
@@ -707,7 +692,6 @@
return -EINVAL;
dlm_reco_data_done_to_host(done);
- DLM_ASSERT(dlm);
DLM_ASSERT(done->dead_node == dlm->reco.dead_node);
spin_lock(&dlm_reco_state_lock);
@@ -945,6 +929,9 @@
res, total_locks);
if (ret < 0) {
// TODO
+ mlog(ML_ERROR, "dlm_send_mig_lockres_msg "
+ "returned %d, TODO\n", ret);
+ BUG();
}
}
queue++;
@@ -953,6 +940,9 @@
ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
if (ret < 0) {
// TODO
+ mlog(ML_ERROR, "dlm_send_mig_lockres_msg returned %d, "
+ "TODO\n", ret);
+ BUG();
}
return ret;
}
@@ -1002,7 +992,7 @@
ret = -ENOMEM;
buf = kmalloc(msg->data_len, GFP_KERNEL);
- item = (dlm_work_item *)kmalloc(sizeof(dlm_work_item), GFP_KERNEL);
+ item = kcalloc(1, sizeof(*item), GFP_KERNEL);
if (!buf || !item)
goto leave;
@@ -1074,7 +1064,6 @@
}
/* queue up work for dlm_mig_lockres_worker */
- memset(item, 0, sizeof(dlm_work_item));
dlm_grab(dlm); /* get an extra ref for the work item */
memcpy(buf, msg->buf, msg->data_len); /* copy the whole message */
dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
@@ -1107,15 +1096,10 @@
dlm_lock_resource *res;
u8 real_master;
- DLM_ASSERT(item);
dlm = item->dlm;
- DLM_ASSERT(dlm);
-
- DLM_ASSERT(data);
mres = (dlm_migratable_lockres *)data;
res = item->u.ml.lockres;
- DLM_ASSERT(res);
real_master = item->u.ml.real_master;
if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
@@ -1224,7 +1208,7 @@
memset(&req, 0, sizeof(req));
req.node_idx = dlm->node_num;
req.namelen = res->lockname.len;
- strncpy(req.name, res->lockname.name, res->lockname.len);
+ memcpy(req.name, res->lockname.name, res->lockname.len);
dlm_master_requery_to_net(&req);
ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
@@ -1288,7 +1272,6 @@
int list_num)
{
struct list_head *ret;
- DLM_ASSERT(res);
DLM_ASSERT(list_num >= 0);
DLM_ASSERT(list_num <= 2);
ret = &(res->granted);
Modified: trunk/fs/ocfs2/dlm/dlmthread.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmthread.c 2005-06-07 00:22:16 UTC (rev 2371)
+++ trunk/fs/ocfs2/dlm/dlmthread.c 2005-06-07 00:46:58 UTC (rev 2372)
@@ -259,8 +259,6 @@
struct list_head *head;
int can_grant = 1;
- DLM_ASSERT(res);
-
//mlog(0, "res->lockname.len=%d\n", res->lockname.len);
//mlog(0, "res->lockname.name=%p\n", res->lockname.name);
//mlog(0, "shuffle res %.*s\n", res->lockname.len,
@@ -592,7 +590,7 @@
while (!kthread_should_stop()) {
int n = DLM_THREAD_MAX_DIRTY;
-
+
/* dlm_shutting_down is very point-in-time, but that
* doesn't matter as we'll just loop back around if we
* get false on the leading edge of a state
@@ -601,14 +599,19 @@
down_read(&dlm->recovery_sem);
- /* this will now do the dlm_shuffle_lists
- * while the dlm->spinlock is unlocked */
+ /* We really don't want to hold dlm->spinlock while
+ * calling dlm_shuffle_lists on each lockres that
+ * needs to have its queues adjusted and AST/BASTs
+ * run. So let's pull each entry off the dirty_list
+ * and drop dlm->spinlock ASAP. Once off the list,
+ * res->spinlock needs to be taken again to protect
+ * the queues while calling dlm_shuffle_lists. */
spin_lock(&dlm->spinlock);
while (!list_empty(&dlm->dirty_list)) {
int delay = 0;
res = list_entry(dlm->dirty_list.next,
dlm_lock_resource, dirty);
-
+
/* peel a lockres off, remove it from the list,
* unset the dirty flag and drop the dlm lock */
DLM_ASSERT(res);
@@ -620,6 +623,8 @@
spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock);
+ /* lockres can be re-dirtied/re-added to the
+ * dirty_list in this gap, but that is ok */
spin_lock(&res->spinlock);
DLM_ASSERT(!(res->state & DLM_LOCK_RES_MIGRATING));
@@ -675,9 +680,11 @@
dlm_flush_asts(dlm);
up_read(&dlm->recovery_sem);
- /* no need to sleep if we know there is more work to do */
- if (!n)
+ /* yield and continue right away if there is more work to do */
+ if (!n) {
+ yield();
continue;
+ }
wait_event_interruptible_timeout(dlm->dlm_thread_wq,
!dlm_dirty_list_empty(dlm) ||
Modified: trunk/fs/ocfs2/dlm/dlmunlock.c
===================================================================
--- trunk/fs/ocfs2/dlm/dlmunlock.c 2005-06-07 00:22:16 UTC (rev 2371)
+++ trunk/fs/ocfs2/dlm/dlmunlock.c 2005-06-07 00:46:58 UTC (rev 2372)
@@ -72,7 +72,19 @@
u8 owner);
+/*
+ * according to the spec:
+ * http://opendlm.sourceforge.net/cvsmirror/opendlm/docs/dlmbook_final.pdf
+ *
+ * flags & LKM_CANCEL != 0: must be converting or blocked
+ * flags & LKM_CANCEL == 0: must be granted
+ *
+ * So to unlock a converting lock, you must first cancel the
+ * convert (passing LKM_CANCEL in flags), then call the unlock
+ * again (with no LKM_CANCEL in flags).
+ */
+
/*
* locking:
* caller needs: none
@@ -127,14 +139,10 @@
status = DLM_RECOVERING;
goto leave;
}
-
- /* according to spec and opendlm code
- * flags & LKM_CANCEL != 0: must be converting or blocked
- * flags & LKM_CANCEL == 0: must be granted
- * iow, to unlock a converting lock, you must first LKM_CANCEL
- * the convert, then call the unlock again with no LKM_CANCEL
- */
+
+ /* see above for what the spec says about
+ * LKM_CANCEL and the lock queue state */
if (flags & LKM_CANCEL)
status = dlm_get_cancel_actions(dlm, res, lock, lksb, &actions);
else
@@ -260,7 +268,7 @@
unlock.flags = flags;
unlock.cookie = lock->ml.cookie;
unlock.namelen = res->lockname.len;
- strncpy(unlock.name, res->lockname.name, unlock.namelen);
+ memcpy(unlock.name, res->lockname.name, unlock.namelen);
iov[0].iov_len = sizeof(dlm_unlock_lock);
iov[0].iov_base = &unlock;
@@ -338,7 +346,8 @@
if (!dlm_grab(dlm))
return DLM_REJECTED;
- DLM_ASSERT(dlm_domain_fully_joined(dlm));
+ mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
+ "Domain %s not fully joined!\n", dlm->name);
mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : "none");
More information about the Ocfs2-commits
mailing list