[Ocfs2-commits] smushran commits r2806 - branches/ocfs2-1.2-cert/patches

svn-commits@oss.oracle.com svn-commits at oss.oracle.com
Fri Mar 31 20:18:26 CST 2006


Author: smushran
Signed-off-by: khackel
Date: 2006-03-31 20:18:24 -0600 (Fri, 31 Mar 2006)
New Revision: 2806

Added:
   branches/ocfs2-1.2-cert/patches/dlm-new_proc_entry
   branches/ocfs2-1.2-cert/patches/ocfs2-1.2-no-idr-0.patch
Modified:
   branches/ocfs2-1.2-cert/patches/dlm-mlog_to_printk
   branches/ocfs2-1.2-cert/patches/series
Log:
ocfs2-1.2-no-idr-0.patch and dlm-new_proc_entry added
Signed-off-by: khackel

Modified: branches/ocfs2-1.2-cert/patches/dlm-mlog_to_printk
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-mlog_to_printk	2006-04-01 01:57:26 UTC (rev 2805)
+++ branches/ocfs2-1.2-cert/patches/dlm-mlog_to_printk	2006-04-01 02:18:24 UTC (rev 2806)
@@ -1,7 +1,7 @@
 Index: ocfs2-1.2-cert/fs/ocfs2/dlm/dlmdomain.c
 ===================================================================
 --- ocfs2-1.2-cert.orig/fs/ocfs2/dlm/dlmdomain.c	2006-03-27 14:36:39.314361000 -0800
-+++ ocfs2-1.2-cert/fs/ocfs2/dlm/dlmdomain.c	2006-03-31 15:31:03.547719000 -0800
++++ ocfs2-1.2-cert/fs/ocfs2/dlm/dlmdomain.c	2006-03-31 18:14:08.706300000 -0800
 @@ -381,12 +381,13 @@
  
  	assert_spin_locked(&dlm->spinlock);
@@ -39,7 +39,7 @@
 Index: ocfs2-1.2-cert/fs/ocfs2/super.c
 ===================================================================
 --- ocfs2-1.2-cert.orig/fs/ocfs2/super.c	2006-03-27 14:36:41.121721000 -0800
-+++ ocfs2-1.2-cert/fs/ocfs2/super.c	2006-03-31 15:31:03.571697000 -0800
++++ ocfs2-1.2-cert/fs/ocfs2/super.c	2006-03-31 18:14:08.718300000 -0800
 @@ -641,9 +641,8 @@
  
  	ocfs2_complete_mount_recovery(osb);
@@ -84,7 +84,7 @@
 Index: ocfs2-1.2-cert/fs/ocfs2/slot_map.c
 ===================================================================
 --- ocfs2-1.2-cert.orig/fs/ocfs2/slot_map.c	2006-03-27 14:36:43.062080000 -0800
-+++ ocfs2-1.2-cert/fs/ocfs2/slot_map.c	2006-03-31 15:31:03.582690000 -0800
++++ ocfs2-1.2-cert/fs/ocfs2/slot_map.c	2006-03-31 18:14:08.724300000 -0800
 @@ -264,7 +264,7 @@
  	osb->slot_num = slot;
  	spin_unlock(&si->si_lock);
@@ -96,9 +96,9 @@
  	if (status < 0)
 Index: ocfs2-1.2-cert/fs/ocfs2/cluster/tcp.c
 ===================================================================
---- ocfs2-1.2-cert.orig/fs/ocfs2/cluster/tcp.c	2006-03-31 15:31:03.232846000 -0800
-+++ ocfs2-1.2-cert/fs/ocfs2/cluster/tcp.c	2006-03-31 16:07:42.029687000 -0800
-@@ -410,8 +410,8 @@
+--- ocfs2-1.2-cert.orig/fs/ocfs2/cluster/tcp.c	2006-03-31 18:13:32.141028000 -0800
++++ ocfs2-1.2-cert/fs/ocfs2/cluster/tcp.c	2006-03-31 18:14:08.739300000 -0800
+@@ -397,8 +397,8 @@
  	}
  
  	if (was_valid && !valid) {
@@ -109,7 +109,7 @@
  		o2net_complete_nodes_nsw(nn);
  	}
  
-@@ -423,10 +423,10 @@
+@@ -410,10 +410,10 @@
  		 * the only way to start connecting again is to down
  		 * heartbeat and bring it back up. */
  		cancel_delayed_work(&nn->nn_connect_expired);
@@ -124,7 +124,7 @@
  	}
  
  	/* trigger the connecting worker func as long as we're not valid,
-@@ -1310,7 +1310,7 @@
+@@ -1294,7 +1294,7 @@
  
  	do_gettimeofday(&now);
  

Added: branches/ocfs2-1.2-cert/patches/dlm-new_proc_entry
===================================================================
--- branches/ocfs2-1.2-cert/patches/dlm-new_proc_entry	2006-04-01 01:57:26 UTC (rev 2805)
+++ branches/ocfs2-1.2-cert/patches/dlm-new_proc_entry	2006-04-01 02:18:24 UTC (rev 2806)
@@ -0,0 +1,110 @@
+Index: ocfs2-1.2-cert/fs/ocfs2/dlm/dlmdebug.c
+===================================================================
+--- ocfs2-1.2-cert.orig/fs/ocfs2/dlm/dlmdebug.c	2006-03-27 14:36:39.586328000 -0800
++++ ocfs2-1.2-cert/fs/ocfs2/dlm/dlmdebug.c	2006-03-31 17:51:22.556100000 -0800
+@@ -58,6 +58,9 @@
+ 					struct dlm_ctxt **dlm,
+ 					struct dlm_lock_resource **res);
+ 
++static int dlm_proc_stats(char *page, char **start, off_t off,
++			  int count, int *eof, void *data);
++
+ typedef int (dlm_debug_func_t)(const char __user *data, unsigned int len);
+ 
+ struct dlm_debug_funcs
+@@ -114,6 +117,8 @@
+ 
+ #define OCFS2_DLM_PROC_PATH "fs/ocfs2_dlm"
+ #define DLM_DEBUG_PROC_NAME "debug"
++#define DLM_STAT_PROC_NAME  "stat"
++
+ static struct proc_dir_entry *ocfs2_dlm_proc;
+ 
+ void dlm_remove_proc(void)
+@@ -140,6 +145,52 @@
+ 		entry->proc_fops = &dlm_debug_operations;
+ }
+ 
++static int dlm_proc_stats(char *page, char **start, off_t off,
++			  int count, int *eof, void *data)
++{
++	int len;
++	struct dlm_ctxt *dlm = data;
++
++	len = sprintf(page, "local=%d, remote=%d, unknown=%d\n",
++		      atomic_read(&dlm->local_resources),
++		      atomic_read(&dlm->remote_resources),
++		      atomic_read(&dlm->unknown_resources));
++
++	if (len <= off + count)
++		*eof = 1;
++
++	*start = page + off;
++	len -= off;
++	if (len > count)
++		len = count;
++	if (len < 0)
++		len = 0;
++
++	return len;
++}
++
++void dlm_proc_add_domain(struct dlm_ctxt *dlm)
++{
++	struct proc_dir_entry *entry;
++
++	dlm->dlm_proc = proc_mkdir(dlm->name, ocfs2_dlm_proc);
++	if (dlm->dlm_proc) {
++		entry = create_proc_read_entry(DLM_STAT_PROC_NAME,
++					       S_IFREG | S_IRUGO, dlm->dlm_proc,
++					       dlm_proc_stats, (char *)dlm);
++		if (entry)
++			entry->owner = THIS_MODULE;
++	}
++}
++
++void dlm_proc_del_domain(struct dlm_ctxt *dlm)
++{
++	if (dlm->dlm_proc) {
++		remove_proc_entry(DLM_STAT_PROC_NAME, dlm->dlm_proc);
++		remove_proc_entry(dlm->name, ocfs2_dlm_proc);
++	}
++}
++
+ /* lock resource printing is usually very important (printed
+  * right before a BUG in some cases), but we'd like to be
+  * able to shut it off if needed, hence the KERN_NOTICE level */
+Index: ocfs2-1.2-cert/fs/ocfs2/dlm/dlmdebug.h
+===================================================================
+--- ocfs2-1.2-cert.orig/fs/ocfs2/dlm/dlmdebug.h	2006-03-27 14:36:39.750164000 -0800
++++ ocfs2-1.2-cert/fs/ocfs2/dlm/dlmdebug.h	2006-03-31 17:20:26.337700000 -0800
+@@ -28,5 +28,7 @@
+ void dlm_remove_proc(void);
+ void dlm_init_proc(void);
+ void dlm_dump_lock_resources(struct dlm_ctxt *dlm);
++void dlm_proc_add_domain(struct dlm_ctxt *dlm);
++void dlm_proc_del_domain(struct dlm_ctxt *dlm);
+ 
+ #endif
+Index: ocfs2-1.2-cert/fs/ocfs2/dlm/dlmdomain.c
+===================================================================
+--- ocfs2-1.2-cert.orig/fs/ocfs2/dlm/dlmdomain.c	2006-03-31 16:30:30.666046000 -0800
++++ ocfs2-1.2-cert/fs/ocfs2/dlm/dlmdomain.c	2006-03-31 17:43:23.458380000 -0800
+@@ -193,6 +193,8 @@
+ 
+ static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm)
+ {
++	dlm_proc_del_domain(dlm);
++
+ 	if (dlm->lockres_hash)
+ 		free_page((unsigned long) dlm->lockres_hash);
+ 
+@@ -1267,6 +1269,8 @@
+ 
+ 	INIT_LIST_HEAD(&dlm->dlm_eviction_callbacks);
+ 
++	dlm_proc_add_domain(dlm);
++
+ 	mlog(0, "context init: refcount %u\n",
+ 		  atomic_read(&dlm->dlm_refs.refcount));
+ 

Added: branches/ocfs2-1.2-cert/patches/ocfs2-1.2-no-idr-0.patch
===================================================================
--- branches/ocfs2-1.2-cert/patches/ocfs2-1.2-no-idr-0.patch	2006-04-01 01:57:26 UTC (rev 2805)
+++ branches/ocfs2-1.2-cert/patches/ocfs2-1.2-no-idr-0.patch	2006-04-01 02:18:24 UTC (rev 2806)
@@ -0,0 +1,260 @@
+Index: fs/ocfs2/cluster/net_proc.c
+===================================================================
+--- fs/ocfs2/cluster/net_proc.c	(revision 2803)
++++ fs/ocfs2/cluster/net_proc.c	(working copy)
+@@ -117,6 +117,7 @@ static int nst_seq_show(struct seq_file 
+ 			   "  process name: %s\n"
+ 			   "  node:         %u\n"
+ 			   "  sc:           %p\n"
++			   "  message id:   %d\n"
+ 			   "  message type: %u\n"
+ 			   "  message key:  0x%08x\n"
+ 			   "  sock acquiry: %lu.%lu\n"
+@@ -125,7 +126,8 @@ static int nst_seq_show(struct seq_file 
+ 			   nst, (unsigned long)nst->st_task->pid,
+ 			   (unsigned long)nst->st_task->tgid,
+ 			   nst->st_task->comm, nst->st_node,
+-			   nst->st_sc, nst->st_msg_type, nst->st_msg_key,
++			   nst->st_sc, nst->st_id, nst->st_msg_type,
++			   nst->st_msg_key,
+ 			   nst->st_sock_time.tv_sec, nst->st_sock_time.tv_usec,
+ 			   nst->st_send_time.tv_sec, nst->st_send_time.tv_usec,
+ 			   nst->st_status_time.tv_sec,
+@@ -253,6 +255,8 @@ static void *sc_seq_next(struct seq_file
+ 	return sc; /* unused, just needs to be null when done */
+ }
+ 
++#define TV_SEC_USEC(TV) TV.tv_sec, TV.tv_usec
++
+ static int sc_seq_show(struct seq_file *seq, void *v)
+ {
+ 	struct o2net_sock_container *sc, *dummy_sc = seq->private;
+@@ -285,11 +289,31 @@ static int sc_seq_show(struct seq_file *
+ 			   "  krefs:           %d\n"
+ 			   "  sock:            %u.%u.%u.%u:%u -> %u.%u.%u.%u:%u\n"
+ 			   "  remote node:     %s\n"
+-			   "  page off:        %zu\n",
+-			   sc, atomic_read(&sc->sc_kref.refcount),
++			   "  page off:        %zu\n"
++			   "  handshake ok:    %u\n"
++			   "  timer:           %lu.%lu\n"
++			   "  data ready:      %lu.%lu\n"
++			   "  advance start:   %lu.%lu\n"
++			   "  advance stop:    %lu.%lu\n"
++			   "  func start:      %lu.%lu\n"
++			   "  func stop:       %lu.%lu\n"
++			   "  func key:        %u\n"
++			   "  func type:       %u\n",
++			   sc,
++			   atomic_read(&sc->sc_kref.refcount),
+ 			   NIPQUAD(saddr), inet ? ntohs(sport) : 0,
+-			   NIPQUAD(daddr), inet ? ntohs(dport) : 0,
+-			   sc->sc_node->nd_name, sc->sc_page_off);
++			   	NIPQUAD(daddr), inet ? ntohs(dport) : 0,
++			   sc->sc_node->nd_name,
++			   sc->sc_page_off,
++			   sc->sc_handshake_ok,
++			   TV_SEC_USEC(sc->sc_tv_timer),
++			   TV_SEC_USEC(sc->sc_tv_data_ready),
++			   TV_SEC_USEC(sc->sc_tv_advance_start),
++			   TV_SEC_USEC(sc->sc_tv_advance_stop),
++			   TV_SEC_USEC(sc->sc_tv_func_start),
++			   TV_SEC_USEC(sc->sc_tv_func_stop),
++			   sc->sc_msg_key,
++			   sc->sc_msg_type);
+ 	}
+ 
+ 
+Index: fs/ocfs2/cluster/tcp.c
+===================================================================
+--- fs/ocfs2/cluster/tcp.c	(revision 2803)
++++ fs/ocfs2/cluster/tcp.c	(working copy)
+@@ -56,7 +56,6 @@
+ #include <linux/kernel.h>
+ #include <linux/jiffies.h>
+ #include <linux/slab.h>
+-#include <linux/idr.h>
+ #include <linux/kref.h>
+ #include <net/tcp.h>
+ 
+@@ -173,39 +172,18 @@ static u8 o2net_num_from_nn(struct o2net
+ 
+ /* ------------------------------------------------------------ */
+ 
+-static int o2net_prep_nsw(struct o2net_node *nn, struct o2net_status_wait *nsw)
++static void o2net_prep_nsw(struct o2net_node *nn, struct o2net_status_wait *nsw)
+ {
+-	int ret = 0;
++	spin_lock(&nn->nn_lock);
+ 
+-	do {
+-		if (!idr_pre_get(&nn->nn_status_idr, GFP_ATOMIC)) {
+-			ret = -EAGAIN;
+-			break;
+-		}
+-		spin_lock(&nn->nn_lock);
+-#ifndef IDR_GET_NEW_RETURNS_ID
+-		ret = idr_get_new(&nn->nn_status_idr, nsw, &nsw->ns_id);
+-#else
+-		/* old semantics */
+-		nsw->ns_id = idr_get_new(&nn->nn_status_idr, nsw);
+-		if (nsw->ns_id < 0)
+-			ret = -EAGAIN;
+-		else
+-			ret = 0;
+-#endif
+-		if (ret == 0)
+-			list_add_tail(&nsw->ns_node_item,
+-				      &nn->nn_status_list);
+-		spin_unlock(&nn->nn_lock);
+-	} while (ret == -EAGAIN);
++	nsw->ns_id = nn->nn_status_next_id++;
++	list_add_tail(&nsw->ns_node_item, &nn->nn_status_list);
+ 
+-	if (ret == 0)  {
+-		init_waitqueue_head(&nsw->ns_wq);
+-		nsw->ns_sys_status = O2NET_ERR_NONE;
+-		nsw->ns_status = 0;
+-	}
++	spin_unlock(&nn->nn_lock);
+ 
+-	return ret;
++	init_waitqueue_head(&nsw->ns_wq);
++	nsw->ns_sys_status = O2NET_ERR_NONE;
++	nsw->ns_status = 0;
+ }
+ 
+ static void o2net_complete_nsw_locked(struct o2net_node *nn,
+@@ -219,31 +197,43 @@ static void o2net_complete_nsw_locked(st
+ 		list_del_init(&nsw->ns_node_item);
+ 		nsw->ns_sys_status = sys_status;
+ 		nsw->ns_status = status;
+-		idr_remove(&nn->nn_status_idr, nsw->ns_id);
+ 		wake_up(&nsw->ns_wq);
+ 	}
+ }
+ 
+-static void o2net_complete_nsw(struct o2net_node *nn,
+-			       struct o2net_status_wait *nsw,
+-			       u64 id, enum o2net_system_error sys_status,
+-			       s32 status)
++static void o2net_complete_nsw_id(struct o2net_node *nn, u32 id,
++				  enum o2net_system_error sys_status,
++				  s32 status)
+ {
++	struct list_head *iter, *tmp;
++	struct o2net_status_wait *nsw;
++	int killed = 0;
++
+ 	spin_lock(&nn->nn_lock);
+-	if (nsw == NULL) {
+-		if (id > INT_MAX)
+-			goto out;
+ 
+-		nsw = idr_find(&nn->nn_status_idr, id);
+-		if (nsw == NULL)
+-			goto out;
++	list_for_each_safe(iter, tmp, &nn->nn_status_list) {
++		nsw = list_entry(iter, struct o2net_status_wait, ns_node_item);
++		if (id == nsw->ns_id) {
++			o2net_complete_nsw_locked(nn, nsw, sys_status, status);
++			killed = 1;
++			break;
++		}
+ 	}
+ 
+-	o2net_complete_nsw_locked(nn, nsw, sys_status, status);
++	spin_unlock(&nn->nn_lock);
+ 
+-out:
++	if (!killed)
++		mlog(ML_ERROR, "didn't find nsw for id %u\n", id);
++}
++
++static void o2net_complete_nsw(struct o2net_node *nn,
++			       struct o2net_status_wait *nsw,
++			       enum o2net_system_error sys_status,
++			       s32 status)
++{
++	spin_lock(&nn->nn_lock);
++	o2net_complete_nsw_locked(nn, nsw, sys_status, status);
+ 	spin_unlock(&nn->nn_lock);
+-	return;
+ }
+ 
+ static void o2net_complete_nodes_nsw(struct o2net_node *nn)
+@@ -951,11 +941,10 @@ int o2net_send_message_vec(u32 msg_type,
+ 	vec[0].iov_base = msg;
+ 	memcpy(&vec[1], caller_vec, caller_veclen * sizeof(struct kvec));
+ 
+-	ret = o2net_prep_nsw(nn, &nsw);
+-	if (ret)
+-		goto out;
++	o2net_prep_nsw(nn, &nsw);
+ 
+ 	msg->msg_num = cpu_to_be32(nsw.ns_id);
++	nst.st_id = nsw.ns_id;
+ 
+ 	do_gettimeofday(&nst.st_send_time);
+ 	/* finally, convert the message header to network byte-order
+@@ -989,7 +978,7 @@ out:
+ 		kfree(vec);
+ 	if (msg)
+ 		kfree(msg);
+-	o2net_complete_nsw(nn, &nsw, 0, 0, 0);
++	o2net_complete_nsw(nn, &nsw, 0, 0);
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(o2net_send_message_vec);
+@@ -1045,10 +1034,9 @@ static int o2net_process_message(struct 
+ 	switch(be16_to_cpu(hdr->magic)) {
+ 		case O2NET_MSG_STATUS_MAGIC:
+ 			/* special type for returning message status */
+-			o2net_complete_nsw(nn, NULL,
+-					   be32_to_cpu(hdr->msg_num),
+-					   be32_to_cpu(hdr->sys_status),
+-					   be32_to_cpu(hdr->status));
++			o2net_complete_nsw_id(nn, be32_to_cpu(hdr->msg_num),
++					      be32_to_cpu(hdr->sys_status),
++					      be32_to_cpu(hdr->status));
+ 			goto out;
+ 		case O2NET_MSG_KEEP_REQ_MAGIC:
+ 			o2net_sendpage(sc, o2net_keep_resp,
+@@ -1865,7 +1853,6 @@ int o2net_init(void)
+ 		/* until we see hb from a node we'll return einval */
+ 		nn->nn_persistent_error = -ENOTCONN;
+ 		init_waitqueue_head(&nn->nn_sc_wq);
+-		idr_init(&nn->nn_status_idr);
+ 		INIT_LIST_HEAD(&nn->nn_status_list);
+ 	}
+ 
+Index: fs/ocfs2/cluster/tcp_internal.h
+===================================================================
+--- fs/ocfs2/cluster/tcp_internal.h	(revision 2803)
++++ fs/ocfs2/cluster/tcp_internal.h	(working copy)
+@@ -62,7 +62,7 @@ struct o2net_node {
+ 	 * or fails or when an accepted socket is attached. */
+ 	wait_queue_head_t		nn_sc_wq;
+ 
+-	struct idr			nn_status_idr;
++	u32				nn_status_next_id;
+ 	struct list_head		nn_status_list;
+ 
+ 	/* connects are attempted from when heartbeat comes up until either hb
+@@ -160,7 +160,7 @@ enum o2net_system_error {
+ struct o2net_status_wait {
+ 	enum o2net_system_error	ns_sys_status;
+ 	s32			ns_status;
+-	int			ns_id;
++	u32			ns_id;
+ 	wait_queue_head_t	ns_wq;
+ 	struct list_head	ns_node_item;
+ };
+@@ -170,6 +170,7 @@ struct o2net_send_tracking {
+ 	struct list_head		st_net_proc_item;
+ 	struct task_struct		*st_task;
+ 	struct o2net_sock_container	*st_sc;
++	u32				st_id;
+ 	u32				st_msg_type;
+ 	u32				st_msg_key;
+ 	u8				st_node;

Modified: branches/ocfs2-1.2-cert/patches/series
===================================================================
--- branches/ocfs2-1.2-cert/patches/series	2006-04-01 01:57:26 UTC (rev 2805)
+++ branches/ocfs2-1.2-cert/patches/series	2006-04-01 02:18:24 UTC (rev 2806)
@@ -21,7 +21,7 @@
 reassert-vs-migration.patch 
 fix-remote-lock-during-reco.patch
 fix-death-during-recovery.patch
-ocfs2-1.2-net-proc-id-0.patch -p0
+ocfs2-1.2-no-idr-0.patch -p0
 dlm-mlog_to_printk 
 remove-bad-spin-unlock.patch 
 continue-finalize-reco.patch




More information about the Ocfs2-commits mailing list