[Ocfs2-commits] zab commits r1841 - trunk/fs/ocfs2/cluster

svn-commits at oss.oracle.com svn-commits at oss.oracle.com
Mon Jan 24 18:03:06 CST 2005


Author: zab
Date: 2005-01-24 18:03:04 -0600 (Mon, 24 Jan 2005)
New Revision: 1841

Modified:
   trunk/fs/ocfs2/cluster/nodemanager.c
   trunk/fs/ocfs2/cluster/nodemanager.h
   trunk/fs/ocfs2/cluster/tcp.c
Log:
manage sockets more aggressively.  connection uses the sock callbacks to watch
for state changes to establish.  transmitters block on the net_inode_private
until connection completes.  transmit and receive get references when they do
their work.  If they see errors they mark the sock for destruction at the next
final decref.

This still needs to be integrated and debugged when net_inode_private gets
proper accounting.

- add addref and decref functions get get socket references from the nm inode
- attach sockets to n_i_p in one place; net_attach_sock()
- teach the rx thread to release sockets that are just marked for destruction
- pass the socket around instead of the inode.. no more N generic_ip derefs
- more thoroughly avoid racing with teardown in the socket callbacks
- clean up net_sock_drain a little
- patch up the status waiting stuff just enough to stop the bleeding


Modified: trunk/fs/ocfs2/cluster/nodemanager.c
===================================================================
--- trunk/fs/ocfs2/cluster/nodemanager.c	2005-01-24 23:57:37 UTC (rev 1840)
+++ trunk/fs/ocfs2/cluster/nodemanager.c	2005-01-25 00:03:04 UTC (rev 1841)
@@ -503,10 +503,16 @@
 	}
 	memcpy(&n->node, &data->arg_u.node, sizeof(nm_node_info));
 	INIT_LIST_HEAD(&n->ip_hash);
+
+	spin_lock_init(&n->net.sock_lock);
 	n->net.sock = NULL;
+	n->net.sock_refs = 0;
+	n->net.sock_pending = 0;
+	n->net.defer_release = 0;
+	INIT_LIST_HEAD(&n->net.pending_waiters);
+	init_waitqueue_head(&n->net.waitq);
+	INIT_LIST_HEAD(&n->net.handlers);
 	INIT_LIST_HEAD(&n->net.active_item);
-	spin_lock_init(&n->net.sock_lock);
-	n->net.flags = 0;
 	n->net.page = NULL;
 	n->net.page_off = 0;
 

Modified: trunk/fs/ocfs2/cluster/nodemanager.h
===================================================================
--- trunk/fs/ocfs2/cluster/nodemanager.h	2005-01-24 23:57:37 UTC (rev 1840)
+++ trunk/fs/ocfs2/cluster/nodemanager.h	2005-01-25 00:03:04 UTC (rev 1841)
@@ -79,19 +79,27 @@
 
 /* TODO: move this */
 struct sock;
-#define NET_FLAG_CREATING_SOCKET   0x00000001
 typedef struct _net_inode_private
 {
+	/* sockets themselves don't seem to have a nice way to refcount them
+	 * above sock_release.  one could use iget/iput, but that seems
+	 * to interact poory with sock_release() itself calling iput. */
+	spinlock_t		sock_lock;
 	struct socket		*sock;
-	spinlock_t		sock_lock;
+	unsigned long		sock_refs;
+	unsigned		sock_pending:1, /* wait before using ->sock */
+				defer_release:1; /* sock busted,release soon */
+	struct list_head	pending_waiters;
+	wait_queue_head_t	waitq;
+
 	struct list_head	handlers;
 	struct list_head	active_item;
-	int			flags;
 	struct page 		*page;
 	size_t			page_off;
 
+	void			(*orig_state_change)(struct sock *sk);
+	void                    (*orig_error_report)(struct sock *sk);
 	void			(*orig_data_ready)(struct sock *sk, int bytes);
-	void                    (*orig_error_report)(struct sock *sk);
 } net_inode_private;
 
 typedef struct _nm_node_inode_private

Modified: trunk/fs/ocfs2/cluster/tcp.c
===================================================================
--- trunk/fs/ocfs2/cluster/tcp.c	2005-01-24 23:57:37 UTC (rev 1840)
+++ trunk/fs/ocfs2/cluster/tcp.c	2005-01-25 00:03:04 UTC (rev 1841)
@@ -20,27 +20,36 @@
  *
  * ----
  *
- * Callers for this were originally written against a very simple 
- * synchronus API.  This implementation reflects those simple callers.  Some
- * day I'm sure we'll need to move to a more robust posting/callback 
- * mechanism.
+ * Callers for this were originally written against a very simple synchronus
+ * API.  This implementation reflects those simple callers.  Some day I'm sure
+ * we'll need to move to a more robust posting/callback mechanism.
  *
  * Transmit calls pass in kernel virtual addresses and block copying this into
- * the socket's tx buffers via a usual blocking sendmsg.  They'll block
- * waiting for a failed socket to timeout.  TX callers can also pass in
- * a poniter to an 'int' which gets filled with an errno off the wire
- * in response to the message they send.
+ * the socket's tx buffers via a usual blocking sendmsg.  They'll block waiting
+ * for a failed socket to timeout.  TX callers can also pass in a poniter to an
+ * 'int' which gets filled with an errno off the wire in response to the
+ * message they send.
  *
- * Handlers for unsolicited messages are registered.  Each socket has
- * a page that incoming data is copied into.  First the header, then
- * the data.  Handlers are called from only one thread with a reference
- * to this per-socket page.  This page is destroyed after the handler
- * call, so it can't be referenced beyond the call.  Handlers may block
- * but are discouraged from doing so.
+ * Handlers for unsolicited messages are registered.  Each socket has a page
+ * that incoming data is copied into.  First the header, then the data.
+ * Handlers are called from only one thread with a reference to this per-socket
+ * page.  This page is destroyed after the handler call, so it can't be
+ * referenced beyond the call.  Handlers may block but are discouraged from
+ * doing so.
  *
- * Any framing errors (bad magic, unknown message types, large payload
- * lengths) closes a connection.
+ * Any framing errors (bad magic, unknown message types, large payload lengths)
+ * closes a connection.
  *
+ * struct socket pointers live in the net_inode_private structure off of the
+ * node manager inodes.  Only one socket is active under an inode at a time and
+ * callers refcount the socket in members of n_i_p.  While a connect() is
+ * pending transmitters will block on the connect state machine until it
+ * completes.  tx and rx get references to the socket while they're doing their
+ * work.  If they see an error they mark the socket for release at the next
+ * final decref.  sk_error_report doesn't get a reference, it just marks the
+ * socket for release and kicks the rx thread.  This means that new references
+ * can get the known-dead socket and see errors.
+ *
  * One can imagine the direction a more sophisticated API would head in:
  * (there are certainly a half dozen examples in the kernel)
  *   * tx
@@ -57,21 +66,17 @@
  * XXX we should resolve these before release
  * 	- disable preemt before calling rx handler when debugging
  * 	- find explicit stack call to drain rx queue
- * 	- goto out style exiting
- * 	- get sin/iov/msg off the stack, per sock structures
  * 	- add trivial version trading message at the start of a conn
  * 	- go nuts adding static
- * 	- properly life-cycle management is waiting on a more functional
- * 	  setup and teardown facility:
- * 		- implement net_remove_handlers
- * 		- refcounting around sock against tx/teardown/etc
- * 		- make sure ->net.page gets torn down with net_inode_private
- * 		- tear down sockets on exit.. via removing their inodes?
- * 		- simplify rx thread exit path (completion, etc)
+ * 	- move recv_sock into recv_thread
+ * 	- nsc waiting is buggy, should be on socket.. wake w/err if socket dies
+ * 	- compare socks in attach_sock so both size don't close
+ * 	- implement net_remove_handlers
+ * 	- make sure ->net.page gets torn down with net_inode_private
+ * 	- tear down sockets on exit.. via removing their inodes?
+ * 	- simplify rx thread exit path (completion, etc)
  *
  * 	- move gsd into its own file
- * 	- move to userspace connection management?
- * 	
  */
 
 #include <linux/module.h>
@@ -138,6 +143,7 @@
 #define sk_user_data		user_data
 #define sk_data_ready		data_ready
 #define sk_error_report		error_report
+#define sk_state_change		state_change
 #endif
 
 struct socket *recv_sock = NULL;
@@ -165,7 +171,8 @@
 static inline void net_abort_status_return(net_status_ctxt *nsc)
 {
 	spin_lock(&net_status_lock);
-	list_del(&nsc->list);
+	if (!list_empty(&nsc->list))
+		list_del_init(&nsc->list);
 	spin_unlock(&net_status_lock);
 }
 
@@ -177,16 +184,18 @@
 static void __exit net_driver_exit (void);
 static int net_add_handler(net_msg_handler *nmh);
 static void net_remove_handlers(void);
-static void net_dump_and_close_sock(struct socket *sock, struct inode *inode);
 static int net_init_tcp_recv_sock(void);
 static int net_receive_thread(void *data);
 static int net_receive(void);
-static int net_accept_tcp_connections(void);
+static void net_try_accept(void);
 static void net_release_tcp_sock(void);
-static int net_process_message(struct inode *inode, struct socket *sock,
-			       net_msg *hdr);
+static int net_process_message(struct socket *sock, net_msg *hdr);
 static int net_ioctl (struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
 
+static int net_sock_addref_or_connect(struct inode *inode,
+				      struct socket **sock_ret);
+static void net_sock_decref(struct inode *inode, int error);
+
 int gsd_message_action(gsd_message *g);
 int gsd_message_handler(net_msg *msg, u32 len, void *data);
 void gsd_teardown(void);
@@ -467,7 +476,7 @@
        	if (status >= 0 && recv_sock) {
 		add_wait_queue_exclusive(recv_sock->sk->sk_sleep, &main_wait);
 		while (1) {
-			net_accept_tcp_connections();
+			net_try_accept();
 			net_receive();
 
 			wait_event_interruptible(*recv_sock->sk->sk_sleep,
@@ -697,11 +706,8 @@
 	 * on shutdown... */
 }
 
-static int net_recv_tcp_msg(struct inode *inode, struct socket *sock,
-			    void *data, size_t len)
+static int net_recv_tcp_msg(struct socket *sock, void *data, size_t len)
 {
-	nm_node_inode_private *priv;
-	nm_node_info *node;
 	int ret;
 	mm_segment_t oldfs;
 	struct iovec iov = { 
@@ -714,52 +720,24 @@
        		.msg_flags = MSG_DONTWAIT,
 	};
 
-	priv = (nm_node_inode_private *)inode->u.generic_ip;
-	node = &priv->node;
-	if (!sock) {
-		spin_lock(&priv->net.sock_lock); 
-		/* TODO: sock refcounting... i think we can get/put the sk */
-		sock = priv->net.sock;
-		spin_unlock(&priv->net.sock_lock); 
-		if (!sock) {
-			ret = -EINVAL;
-			goto out;
-		}
-	}
-	
 	oldfs = get_fs();
 	set_fs(get_ds());
 	ret = sock_recvmsg(sock, &msg, len, msg.msg_flags);
 	set_fs(oldfs);
 
-out:
 	return ret;
 }
 
-static int net_send_tcp_msg(struct inode *inode, struct socket *sock, 
-			    struct iovec *iov, size_t iovlen, size_t total)
+static int net_send_tcp_msg(struct socket *sock, struct iovec *iov,
+			    size_t iovlen, size_t total)
 {
 	int ret;
-	nm_node_inode_private *priv;
-	nm_node_info *node;
 	mm_segment_t oldfs;
 	struct msghdr msg = {
 		.msg_iov = iov,
 		.msg_iovlen = iovlen,
 	};
 
-	priv = (nm_node_inode_private *)inode->u.generic_ip;
-	node = &priv->node;
-	if (!sock) {
-		spin_lock(&priv->net.sock_lock);
-		/* TODO: sock refcounting... i think we can get/put the sk */
-		sock = priv->net.sock;
-		spin_unlock(&priv->net.sock_lock);
-	}
-
-	netprintk("Sending msg to node=%u, name=%s\n",
-		  node->node_num, node->node_name);
-
 	if (sock == NULL) {
 		ret = -EINVAL;
 		goto out;
@@ -804,20 +782,19 @@
 	net_msg *msg = NULL;
 	net_status_ctxt nsc;
 	wait_queue_t sleep;
-	nm_node_inode_private *priv = NULL;
-	net_inode_private *net = NULL;
 	size_t i, iovlen, caller_bytes = 0;
 	struct iovec *iov = NULL;
+	struct socket *sock = NULL;
 
 	if (!inode || !inode->u.generic_ip) {
 		netprintk0("bad inode, cannot send message\n");
 		ret = -EINVAL;
-		goto done;
+		goto out;
 	}
 	if (caller_iovlen == 0) {
 		netprintk0("bad iovec array length\n");
 		ret = -EINVAL;
-		goto done;
+		goto out;
 	}
 
 	for(i = 0; i < caller_iovlen; i++)
@@ -826,38 +803,27 @@
 	if (caller_bytes > NET_MAX_PAYLOAD_BYTES) {
 		netprintk("total payload len %zu too large\n", caller_bytes);
 		ret = -EINVAL;
-		goto done;
+		goto out;
 	}
 
-	priv = (nm_node_inode_private *)inode->u.generic_ip;
-	net = &priv->net;
-	spin_lock(&net->sock_lock); 
-	if (!net->sock) {
-		spin_unlock(&net->sock_lock);
-		ret = net_init_tcp_sock(inode);
-		if (!(ret == 0 || ret == -EEXIST)) {
-			netprintk0("failed to create socket!\n");
-			ret = -EINVAL;
-			goto done;
-		}
-		spin_lock(&net->sock_lock); 
-	}
-	spin_unlock(&net->sock_lock); 
-	
+	ret = net_sock_addref_or_connect(inode, &sock);
+	if (ret)
+		goto out;
+
 	/* build up our iovec */
 	iovlen = caller_iovlen + 1;
 	iov = kmalloc(sizeof(struct iovec) * iovlen, GFP_KERNEL);
 	if (iov == NULL) {
 		netprintk("failed to %zu element iovec!\n", iovlen);
 		ret = -ENOMEM;
-		goto done;
+		goto out;
 	}
 
 	msg = kmalloc(sizeof(net_msg), GFP_KERNEL);
 	if (!msg) {
 		netprintk("failed to allocate a net_msg!\n");
 		ret = -ENOMEM;
-		goto done;
+		goto out;
 	}
 	memset(msg, 0, sizeof(net_msg));
 	msg->magic = NET_MSG_MAGIC;
@@ -874,30 +840,22 @@
 	if (status) {
 		msg->status = 1;
 
-		INIT_LIST_HEAD(&nsc.list);
 		init_waitqueue_head(&nsc.wq);
 		atomic_set(&nsc.woken, 0);
 		nsc.msg_num = msg->msg_num;
 		nsc.status = 0;
+
+		init_waitqueue_entry(&sleep, current);
+		add_wait_queue(&nsc.wq, &sleep);
+
 		spin_lock(&net_status_lock);
-		list_add(&nsc.list, &net_status_list);
+		list_add_tail(&nsc.list, &net_status_list);
 		spin_unlock(&net_status_lock);
-
-		init_waitqueue_entry(&sleep, current);
-		spin_lock(&net->sock_lock);
-		if (!net->sock) {
-			spin_unlock(&net->sock_lock);
-			netprintk0("caller wanted status return but socket went away!\n");
-			kfree(msg);
-			return -EINVAL;
-		}
-		add_wait_queue(net->sock->sk->sk_sleep, &sleep);
-		spin_unlock(&net->sock_lock); 
 	}
 
 	/* finally, convert the message header to network byte-order and send */
 	net_msg_to_net(msg);
-	ret = net_send_tcp_msg(inode, NULL, iov, iovlen,
+	ret = net_send_tcp_msg(sock, iov, iovlen,
 			       sizeof(net_msg) + caller_bytes);
 	net_msg_to_host(msg);  /* just swapping for printk, its unused now */
 	msgprintk(msg, "sending returned %d\n", ret);
@@ -916,17 +874,19 @@
 				netprintk("status return requested, and error occurred while waiting=%d\n", ret);
 				*status = ret;
 			}
-			remove_wait_queue(recv_sock->sk->sk_sleep, &sleep);
 		} else {
 			netprintk("status return requested, and error returned from net_send_tcp_msg=%d\n", ret);
 			/* return bad status right away */
 			*status = ret;
 		}
+		remove_wait_queue(&nsc.wq, &sleep);
 	} else if (ret < 0) {
 		netprintk("no status return requested, but error returned from net_send_tcp_msg=%d\n", ret);
 	}
 
-done:
+out:
+	if (sock)
+		net_sock_decref(inode, ret);
 	if (iov)
 		kfree(iov);
 	if (msg)
@@ -961,8 +921,7 @@
 }
 EXPORT_SYMBOL(net_send_message);
 
-static int net_send_status_magic(struct inode *inode, struct socket *sock,
-			         net_msg *hdr, int err)
+static int net_send_status_magic(struct socket *sock, net_msg *hdr, int err)
 {
 	struct iovec iov = {
 		.iov_base = hdr,
@@ -978,7 +937,7 @@
 	msgprintk(hdr, "about to send status magic %d\n", err);
 	/* hdr has been in host byteorder this whole time */
 	net_msg_to_net(hdr);
-	return net_send_tcp_msg(inode, sock, &iov, 1, sizeof(net_msg));
+	return net_send_tcp_msg(sock, &iov, 1, sizeof(net_msg));
 }
 
 static inline int net_is_valid_error_type(u32 err_type)
@@ -989,8 +948,7 @@
 	return 0;
 }
 
-static void net_send_error(struct inode *inode, struct socket *sock,
-			   u16 err_type)
+static void net_send_error(struct socket *sock, u16 err_type)
 {
 	net_msg hdr = {
 		.magic        = NET_MSG_MAGIC,
@@ -1009,49 +967,69 @@
 
 	msgprintk(&hdr, "about to send error %u\n", err_type);
 	net_msg_to_net(&hdr);
-	net_send_tcp_msg(inode, sock, &iov, 1, sizeof(net_msg));
+	net_send_tcp_msg(sock, &iov, 1, sizeof(net_msg));
 out:
         return;
 }
 
-static void net_got_sock_callback(net_inode_private *net, struct sock *sk)
+/* net_active_lock must be held */
+static void net_make_active(net_inode_private *net)
 {
-	BUG_ON(net == NULL);
-	BUG_ON(net->sock == NULL);
-	BUG_ON(net->sock->sk != sk);
-
-	spin_lock(&net_active_lock);
 	if (list_empty(&net->active_item))
 		list_add_tail(&net->active_item, &net_active_list);
-	spin_unlock(&net_active_lock);
 
 	if (recv_sock != NULL)
 		wake_up(recv_sock->sk->sk_sleep);
 }
 
+/* teardown can race with these guys and stop them in their read lock.. 
+ * teardown will clear sk_user_data and reset the callbacks so that these
+ * guys can know to call them and not lose the event.. */
 static void net_data_ready(struct sock *sk, int bytes)
 {
-	net_inode_private *net = sk->sk_user_data;
 	void (*ready)(struct sock *sk, int bytes);
+	net_inode_private *net;
 
 	read_lock(&sk->sk_callback_lock);
-	net_got_sock_callback(net, sk);
+	net = sk->sk_user_data;
+	if (net == NULL) {
+		ready = sk->sk_data_ready;
+		goto out;
+	}
+
+	netprintk("data_ready hit for net %p\n", net);
+
+	spin_lock(&net_active_lock);
+	net_make_active(net);
+	spin_unlock(&net_active_lock);
+
 	ready = net->orig_data_ready;
+out:
 	read_unlock(&sk->sk_callback_lock);
-
 	ready(sk, bytes);
 
 }
 static void net_error_report(struct sock *sk)
 {
-	net_inode_private *net = sk->sk_user_data;
 	void (*report)(struct sock *sk);
+	net_inode_private *net;
 
 	read_lock(&sk->sk_callback_lock);
-	net_got_sock_callback(net, sk);
+	net = sk->sk_user_data;
+	if (net == NULL) {
+		report = sk->sk_error_report;
+		goto out;
+	}
+
+	netprintk("error_report hit for net %p\n", net);
+
+	spin_lock(&net_active_lock);
+	net_make_active(net);
+	spin_unlock(&net_active_lock);
+
 	report = net->orig_error_report;
+out:
 	read_unlock(&sk->sk_callback_lock);
-
 	report(sk);
 }
 
@@ -1063,7 +1041,7 @@
 	net_inode_private *net;
 	struct socket *sock;
 	net_msg *hdr;
-	int err = 0, read_eagain;
+	int err = 0, read_eagain, read_some;
 	void *data;
 	size_t datalen;
 
@@ -1087,19 +1065,35 @@
 
 		priv = container_of(net, nm_node_inode_private, net);
 	       	inode = priv->inode;
-		sock = net->sock;
-		BUG_ON(sock == NULL); /* real refcounting, please! */
+		sock = NULL;
 
 		err = 0;
 		read_eagain = 0;
+		read_some = 0;
 
+		spin_lock_bh(&net->sock_lock);
+		if (net->sock && !net->sock_pending) {
+			sock = net->sock;
+			net->sock_refs++;
+			if (net->defer_release)
+				err = -ENOTCONN;
+		}
+		spin_unlock_bh(&net->sock_lock);
+
+		if (sock == NULL)
+			continue;
+
+		if (err)
+			goto done;
+
 		/* do we need more header? */
 		if (net->page_off < sizeof(net_msg)) {
 			data = page_address(net->page) + net->page_off;
 			datalen = sizeof(net_msg) - net->page_off;
-			err = net_recv_tcp_msg(inode, sock, data, datalen);
+			err = net_recv_tcp_msg(sock, data, datalen);
 			if (err > 0) {
 				net->page_off += err;
+				read_some = 1;
 				/* only swab incoming here.. we can
 				 * only get here once as we cross from
 				 * being under to over */
@@ -1133,9 +1127,11 @@
 			data = page_address(net->page) + net->page_off;
 			datalen = (sizeof(net_msg) + hdr->data_len) -
 				  net->page_off;
-			err = net_recv_tcp_msg(inode, sock, data, datalen);
-			if (err > 0)
+			err = net_recv_tcp_msg(sock, data, datalen);
+			if (err > 0) {
+				read_some = 1;
 				net->page_off += err;
+			}
 			if (err < 0) {
 				if (err == -EAGAIN)
 					read_eagain = 1;
@@ -1146,7 +1142,7 @@
 		if (net->page_off - sizeof(net_msg) == hdr->data_len) {
 			/* whooo peee, we have a full message */
 			/* after calling this the message is toast */
-			err = net_process_message(inode, sock, hdr);
+			err = net_process_message(sock, hdr);
 			net->page_off = 0;
 		}
 	
@@ -1156,21 +1152,20 @@
 		 * as long as there may still be remaining data.  
 		 * data_ready might have been called after we saw eagain */
 		spin_lock_bh(&net_active_lock);
-		if (!read_eagain && list_empty(&net->active_item))
-			list_add_tail(&net->active_item, &net_active_list);
+		if (read_some && !read_eagain)
+			net_make_active(net);
 		spin_unlock_bh(&net_active_lock);
 
 		netprintk("net %p finished reading with %d\n", net, err);
 		if (err < 0 && err != -EAGAIN) {
 			netprintk("socket saw err %d, closing\n", err);
-			net_dump_and_close_sock(sock, inode);
+			net_sock_decref(inode, err);
 		}
 	}
 
 	return 0;
 }
 
-
 static void net_do_status_return(net_msg *hdr)
 {
 	net_status_ctxt *nsc = NULL;
@@ -1182,7 +1177,7 @@
 		if (nsc->msg_num == hdr->msg_num) {
 			nsc->status = hdr->status;
 			atomic_set(&nsc->woken, 1);
-			list_del(&nsc->list);
+			list_del_init(&nsc->list);
 			wake_up(&nsc->wq);
 			break;
 		}
@@ -1195,8 +1190,7 @@
 
 /* this returns -errno if the header was unknown or too large, etc.
  * after this is called the buffer us reused for the next message */
-static int net_process_message(struct inode *inode, struct socket *sock,
-			       net_msg *hdr)
+static int net_process_message(struct socket *sock, net_msg *hdr)
 {
 	int ret;
 	net_msg_handler *hnd = NULL;
@@ -1252,7 +1246,7 @@
 	if (hdr->status) {
 		int tmpret;
 		/* this destroys the hdr, so don't use it after this */
-		tmpret = net_send_status_magic(inode, sock, hdr, ret);
+		tmpret = net_send_status_magic(sock, hdr, ret);
 		hdr = NULL;
 		netprintk("sending status %d returned %d\n", ret, tmpret);
 		ret = 0;
@@ -1266,359 +1260,464 @@
 	return ret;
 }
 
-static void net_record_new_sock(net_inode_private *net)
+/*
+ * The rest of the file is all about managing sockets.
+ */
+
+static int net_attach_sock(net_inode_private *net, struct socket *sock)
 {
 	struct sock *sk;
+	int ret = 0;
 
-	BUG_ON(net->sock == NULL);
-	BUG_ON(net->sock->sk == NULL);
+	netprintk("attaching sock %p to net %p\n", sock, net);
 
-	netprintk("added net %p to net_active_list\n", net);
+	/* this could be racing with an active connect, it needs to
+	 * compare the socks consistently so both sides agree to close
+	 * the same socket */
+	spin_lock_bh(&net->sock_lock); 
+	if (net->sock != NULL && net->sock != sock)
+		ret = -EEXIST;
+	else 
+		net->sock = sock;
+	spin_unlock_bh(&net->sock_lock); 
+	if (ret)
+		goto out;
 
 	sk = net->sock->sk;
+	tcp_sk(sk)->nonagle = 1;
+
 	write_lock_bh(&sk->sk_callback_lock);
-	if (sk->sk_user_data != net) {
-		net->orig_data_ready = sk->sk_data_ready;
-		net->orig_error_report = sk->sk_error_report;
+	net->orig_data_ready = sk->sk_data_ready;
+	net->orig_error_report = sk->sk_error_report;
 
-		sk->sk_user_data = net;
-		sk->sk_data_ready = net_data_ready;
-		sk->sk_error_report = net_error_report;
-	}
+	sk->sk_user_data = net;
+	sk->sk_data_ready = net_data_ready;
+	sk->sk_error_report = net_error_report;
 	write_unlock_bh(&sk->sk_callback_lock);
 
 	/* record it as active initially to make sure we didn't miss
 	 * any incoming data while we were setting it up */
 	spin_lock_bh(&net_active_lock);
-	if (list_empty(&net->active_item))
-		list_add_tail(&net->active_item, &net_active_list);
+	net_make_active(net);
 	spin_unlock_bh(&net_active_lock);
+out:
+	netprintk("attaching sock %p to net %p returned: %d\n", sock, net,
+		  ret);
+	return ret;
+}
 
-	if (recv_sock != NULL)
-		wake_up(recv_sock->sk->sk_sleep);
+struct waiting_for_sock {
+	int			rc;
+	struct list_head	waiting_item;
+	wait_queue_t 		entry;
+};
+
+/* must hold sock lock */
+static void net_wake_sock_waiters(net_inode_private *net, int rc)
+{
+	struct list_head *pos, *tmp;
+	struct waiting_for_sock *wfs;
+
+	netprintk("net %p waking waiters with rc %d\n", net, rc);
+
+	list_for_each_safe(pos, tmp, &net->pending_waiters) {
+		wfs = list_entry(pos, struct waiting_for_sock, waiting_item);
+		list_del_init(&wfs->waiting_item);
+
+		wfs->rc = rc;
+	}
+
+	wake_up(&net->waitq);
 }
 
-/*
- * net_accept_tcp_connections()
- *
- */
-static int net_accept_tcp_connections(void)
+/* we register this callback when we start a connect.  once we're done
+ * with the connect state machine we unregister ourselves.  teardown of
+ * active connected sockets only happens in the rx thread.  this socket
+ * can only make it to the rx thread through here and we reset _state_change.
+ * this can't race with teardown */
+static void net_state_change(struct sock *sk)
 {
-	int error, slen;
-	struct sockaddr_in sin;
-	struct socket *sock;
-	struct inode *inode;
+	net_inode_private *net;
+	void (*state_change)(struct sock *sk);
+	int rc = 0, should_wake = 1;
 
-	if (!recv_sock) {
-		netprintk0("no socket!\n");
-		return 0;
-	}
+	write_lock(&sk->sk_callback_lock);
 	
-	if (!tcp_sk(recv_sock->sk)->accept_queue) {
-		//netprintk0("no connections on the queue\n");
-		return 0;
-	}
-	error = 0;
-	while (error >= 0) {
-		error = sock_create_lite(recv_sock->sk->sk_family,
-					 recv_sock->sk->sk_type,
-					 recv_sock->sk->sk_protocol,
-					 &sock);
-		if (error)
-			break;
+	net = sk->sk_user_data;
+	BUG_ON(net == NULL);
+	BUG_ON(net->sock == NULL);
 
-		sock->type = recv_sock->type;
-		sock->ops = recv_sock->ops;
-		error = recv_sock->ops->accept(recv_sock, sock, O_NONBLOCK);
-		if (error < 0) {
-			sock_release(sock);
-			break;
-		}
-		if (sock->sk->sk_state == TCP_CLOSE) {
-			sock_release(sock);
-			continue;
-		}
+	state_change = net->orig_state_change;
 
-		tcp_sk(recv_sock->sk)->nonagle = 1;
-			
-		slen = sizeof(sin);
-		error = sock->ops->getname(sock, (struct sockaddr *) &sin, &slen, 1);
-		if (error < 0)
+	switch(sk->sk_state) {
+		case TCP_SYN_SENT: 
+		case TCP_SYN_RECV: 
+			should_wake = 0;
 			break;
-		
-		netprintk("attempt to connect from %u.%u.%u.%u:%04x\n", 
-			NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
+		case TCP_ESTABLISHED: 
+			rc = 0;
+			break;
+		default:
+			rc = -ENOTCONN;
+			break;
+	}
 
-		inode = nm_get_node_by_ip(sin.sin_addr.s_addr);
-		if (inode) {
-			int exists = 1;
-			nm_node_inode_private *priv = inode->u.generic_ip;
-			net_inode_private *net = NULL;
+	netprintk("net %p sock %p went to state %d; should_wake %d rc %d\n",
+		  net, net->sock, sk->sk_state, should_wake, rc);
 
-			if (priv) {
-				net = &priv->net;
-				netprintk("connect from known host: %s\n",
-				      priv->node.node_name);
-				if (ntohs(sin.sin_port) >= 1024)
-					netprintk("warning: connect from unprivileged port: %u.%u.%u.%u:%d\n",
-						NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
-	 			spin_lock(&priv->net.sock_lock); 
-				if (!priv->net.sock) {
-					netprintk("new sock, giving net %p sock %p\n", net, sock); 
-					exists = 0;
-					priv->net.sock = sock;
+	if (should_wake) {
+		spin_lock(&net->sock_lock);
+		if (net->sock_pending) {
+			net->sock_pending = 0;
+			/* let the rx thread do the final _release when
+			 * they go to grab this guy */ 
+			if (rc)
+				net->defer_release = 1;
 
-				}
-	 			spin_unlock(&priv->net.sock_lock); 
+			sk->sk_state_change = net->orig_state_change;
+			net_wake_sock_waiters(net, rc);
+		} else
+			should_wake = 0;
+		spin_unlock(&net->sock_lock);
+	}
 
-				if (exists) {
-					netprintk0("already a socket for this connection!\n");
-					net_send_error(inode, sock, NET_ALREADY_CONNECTED);
-					net_dump_and_close_sock(sock, inode);
-				} else {
-					net_record_new_sock(net);
-				}
-			}
+	write_unlock(&sk->sk_callback_lock);
 
-			iput(inode);
-		} else {
-			netprintk0("connect from unknown host...\n");
-			net_send_error(inode, sock, NET_UNKNOWN_HOST);
-			net_dump_and_close_sock(sock, inode);
-		}
+	/* net_attach grabs every lock in the known universe so we do it
+	 * out here */
+	if (should_wake)
+		net_attach_sock(net, net->sock);
+
+	state_change(sk);
+}	
+
+static int net_start_connect(net_inode_private *net, nm_node_info *node)
+{
+	struct socket *sock = NULL;
+	struct sock *sk;
+	struct sockaddr_in myaddr, remoteaddr;
+	int ret;
+
+	ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
+	if (ret < 0) {
+		netprintk("can't create socket: %d\n", ret);
+		goto out;
 	}
-	return error;
+
+	memset(&myaddr, 0, sizeof(myaddr));
+	myaddr.sin_family = AF_INET;
+	myaddr.sin_port = htons(0);  // any port
+	ret = sock->ops->bind(sock, (struct sockaddr *)&myaddr,
+			      sizeof(myaddr));
+	if (ret) {
+		netprintk("bind failed: %d\n", ret);
+		goto out;
+	}
+	
+	memset (&remoteaddr, 0, sizeof (remoteaddr));
+	remoteaddr.sin_family = net_ip_version_to_family(node->ifaces[0].ip_version);
+	remoteaddr.sin_addr.s_addr = node->ifaces[0].addr_u.ip_addr4;
+	remoteaddr.sin_port = node->ifaces[0].ip_port;
+
+	net->sock = sock;
+
+	sk = sock->sk;
+	write_lock_bh(&sk->sk_callback_lock);
+	sk->sk_user_data = net;
+	net->orig_state_change = sk->sk_state_change;
+	sk->sk_state_change = net_state_change;
+	write_unlock_bh(&sk->sk_callback_lock);
+
+	ret = sock->ops->connect(sock, (struct sockaddr *)&remoteaddr, 
+				 sizeof(remoteaddr), O_NONBLOCK);
+	if (ret == -EINPROGRESS)
+		ret = 0;
+	netprintk("starting connect for net %p sock %p gave %d\n", net, sock,
+		  ret);
+out:
+	if (ret) {
+		spin_lock_bh(&net->sock_lock);
+		net->sock_pending = 0;
+		net->sock = NULL;
+		net_wake_sock_waiters(net, ret);
+		spin_unlock_bh(&net->sock_lock);
+		if (sock)
+			sock_release(sock);
+	}
+	return ret;
 }
 
-static void net_dump_and_close_sock(struct socket *sock, struct inode *inode)
+static void net_sock_drain(struct socket *sock)
 {
-	nm_node_inode_private *priv = NULL;
-	struct msghdr           msg;
-	struct iovec            iov;
-	int                     len;
-	mm_segment_t            oldfs;
+	int             len;
+	mm_segment_t    oldfs;
+	static char	junk[PAGE_SIZE];
+	struct iovec iov = {
+		.iov_base = junk,
+		.iov_len = sizeof(junk),
+	};
+	struct msghdr msg = {
+		.msg_iov      = &iov,
+		.msg_iovlen   = 1,
+		.msg_flags    = MSG_DONTWAIT,
+	};
 
-	if (sock->sk) {
-		if (inode)
-	       		priv = inode->u.generic_ip;
-		if (!priv)
-			goto release;
+	oldfs = get_fs();
+	set_fs(KERNEL_DS);
+	for(len = 1; sock->sk && len > 0; )
+		len = sock_recvmsg(sock, &msg, PAGE_SIZE, MSG_DONTWAIT);
 
-		len = 1;
-		while (len>0)
-		{
-			msg.msg_name     = 0;
-			msg.msg_namelen  = 0;
-			msg.msg_iov      = &iov;
-			msg.msg_iovlen   = 1;
-			msg.msg_control  = NULL;
-			msg.msg_controllen = 0;
-			msg.msg_flags    = MSG_DONTWAIT;
-			msg.msg_iov->iov_base = page_address(priv->net.page);
-			msg.msg_iov->iov_len  = (__kernel_size_t)PAGE_SIZE;
+	set_fs(oldfs);
+}
 
-			oldfs = get_fs();
-			set_fs(KERNEL_DS);
-			len = sock_recvmsg(sock, &msg, PAGE_SIZE, MSG_DONTWAIT);
-			set_fs(oldfs);
+static void net_sock_decref(struct inode *inode, int error)
+{
+	net_inode_private *net = NULL;
+	nm_node_inode_private *priv;
+	struct socket *sock = NULL;
+	int release = 0;
 
-			if (!len)
-				break;
-		}
+	priv = inode->u.generic_ip;
+	if (!priv) {
+		netprintk("bad inode %p\n", inode);
+		return;
 	}
+	net = &priv->net;
 
-	if (sock->sk) {
-		if (inode) {
-	       		priv = inode->u.generic_ip;
-			if (priv) {
-	 			spin_lock(&priv->net.sock_lock); 
-				priv->net.sock = NULL;
-	 			spin_unlock(&priv->net.sock_lock); 
-			}
+	spin_lock_bh(&net->sock_lock); 
+
+	BUG_ON(net->sock_pending);
+	BUG_ON(net->sock_refs == 0);
+	BUG_ON(net->sock == NULL);
+
+	netprintk("decref for net %p ->sock %p err %d: refs %lu defer %u\n",
+		  net, net->sock, error, net->sock_refs, net->defer_release);
+
+	if (error)
+		net->defer_release = 1;
+	if (--net->sock_refs == 0 && net->defer_release) {
+		sock = net->sock;
+		net->sock = NULL;
+		net->defer_release = 0;
+		release = 1;
+	}
+	spin_unlock_bh(&net->sock_lock); 
+
+	if (release) {
+		/* stop any callbacks from hitting before we tear down
+		 * this sock */
+		if (sock->sk && sock->sk->sk_user_data) {
+			struct sock *sk = sock->sk;
+
+			write_lock_bh(&sk->sk_callback_lock);
+			sk->sk_data_ready = net->orig_data_ready;
+			sk->sk_error_report = net->orig_error_report;
+			sk->sk_user_data = NULL;
+			write_unlock_bh(&sk->sk_callback_lock);
+			/* XXX can we sync with bottom halves here? */
 		}
+		sock_release(sock);
 	}
-release:
-	sock_release(sock);
 }
 
-/* this is racey beyond reason, the userspace work will involve some tracking
- * structures that senders can wait on or time out on for connections
- * to happen */
-int net_init_tcp_sock(struct inode *inode)
+static int wfs_complete(net_inode_private *net, struct waiting_for_sock *wfs)
 {
+	int empty;
+
+	spin_lock_bh(&net->sock_lock); 
+	empty = list_empty(&wfs->waiting_item);
+	spin_unlock_bh(&net->sock_lock); 
+
+	return empty;
+}
+
+static int net_sock_addref_or_connect(struct inode *inode,
+				      struct socket **sock_ret)
+{
 	nm_node_inode_private *priv;
 	nm_node_info *node;
 	net_inode_private *net = NULL;
-	struct sockaddr_in myaddr, remoteaddr;
-	int err = -EINVAL;
-	int i;
-	struct sock *sk;
 	struct socket *sock = NULL;
+	int ret = 0, wait = 0, set_pending = 0;
+	struct waiting_for_sock wfs;
 
 	priv = inode->u.generic_ip;
 	if (!priv) {
-		netprintk0("bad inode\n");
-		return -EINVAL;
+		netprintk("bad inode %p\n", inode);
+		ret = -EINVAL;
+		goto out;
 	}
 	net = &priv->net;
 	node = &priv->node;
-	
-	if ((err = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) {
-		netprintk("can't create socket: err=%d\n", err);
-		return err;
+
+	spin_lock_bh(&net->sock_lock); 
+	if (net->sock && !net->sock_pending) {
+		/* just get a ref.  this could be a defer_release socket */
+		sock = net->sock;
+		net->sock_refs++;
+	} else {
+		if (!net->sock_pending) {
+			/* ok, we'll be initiating the connect */
+			net->sock_pending = 1;
+			set_pending = 1;
+		}
+		list_add_tail(&wfs.waiting_item, &net->pending_waiters);
+		init_waitqueue_entry(&wfs.entry, current);
+		add_wait_queue(&net->waitq, &wfs.entry);
+		wait = 1;
 	}
+	spin_unlock_bh(&net->sock_lock); 
 
-	spin_lock(&net->sock_lock); 
-	if (net->sock || net->flags & NET_FLAG_CREATING_SOCKET) {
-		netprintk("socket already created or creating for inode %lu\n", inode->i_ino);
-		spin_unlock(&net->sock_lock);
-		sock_release(sock);
-		return -EEXIST;
+	if (set_pending) {
+		ret = net_start_connect(net, node);
+		if (ret)
+			goto out;
 	}
-	net->flags |= NET_FLAG_CREATING_SOCKET;
-	spin_unlock(&net->sock_lock);
 
-	memset(&myaddr, 0, sizeof(myaddr));
-	myaddr.sin_family = AF_INET;
-	myaddr.sin_port = htons(0);  // any port
-	err = sock->ops->bind(sock, (struct sockaddr *) &myaddr, sizeof(myaddr));
+	if (wait) {
+		ret = wait_event_interruptible(net->waitq,
+					       wfs_complete(net, &wfs));
+		if (ret == 0)
+			ret = wfs.rc;
+		netprintk("sleeping for net %p gave %d\n", net, ret);
+		if (ret)
+			goto out;
+
+		/* try again to get a good socket.   if we can't, just
+		 * forget about it. */
+		spin_lock_bh(&net->sock_lock); 
+		if (net->sock && !net->sock_pending) {
+			sock = net->sock;
+			net->sock_refs++;
+		} else
+			ret = -ENOTCONN;
+		spin_unlock_bh(&net->sock_lock); 
+		if (ret)
+			goto out;
+	}
 	
-	memset (&remoteaddr, 0, sizeof (remoteaddr));
-	remoteaddr.sin_family = net_ip_version_to_family(node->ifaces[0].ip_version);
-	remoteaddr.sin_addr.s_addr = node->ifaces[0].addr_u.ip_addr4;
-	remoteaddr.sin_port = node->ifaces[0].ip_port;
+out:
+	if (wait) {
+		spin_lock_bh(&net->sock_lock); 
+		if (!list_empty(&wfs.waiting_item))
+			list_del_init(&wfs.waiting_item);
+		remove_wait_queue(&net->waitq, &wfs.entry);
+		spin_unlock_bh(&net->sock_lock); 
+	}
+	if (sock)
+		*sock_ret = sock;
 
-	//netprintk("connecting new socket: ip %d.%d.%d.%d, port %d\n", NIPQUAD(remoteaddr.sin_addr.s_addr), remoteaddr.sin_port);
-	err = sock->ops->connect(sock, (struct sockaddr *) &remoteaddr, 
-					sizeof(remoteaddr), 0); /* TODO put this back!  O_NONBLOCK); */
-	//netprintk("connect status %d\n", err);
+	BUG_ON(ret == 0 && sock == NULL);
+	netprintk("addref for net %p gave %d\n", net, ret);
+	return ret;
+}
+
+static void net_try_accept(void)
+{
+	int error, slen;
+	struct sockaddr_in sin;
+	struct socket *sock = NULL;
+	struct inode *inode = NULL;
+	nm_node_inode_private *priv;
+
+	BUG_ON(recv_sock == NULL);
+	error = sock_create_lite(recv_sock->sk->sk_family,
+				 recv_sock->sk->sk_type,
+				 recv_sock->sk->sk_protocol,
+				 &sock);
+	if (error)
+		goto out;
+
+	sock->type = recv_sock->type;
+	sock->ops = recv_sock->ops;
+	error = recv_sock->ops->accept(recv_sock, sock, O_NONBLOCK);
+	if (error < 0)
+		goto out;
+
+	slen = sizeof(sin);
+	error = sock->ops->getname(sock, (struct sockaddr *) &sin, &slen, 1);
+	if (error < 0)
+		goto out;
 	
-	if (err >= 0) {
-		spin_lock(&net->sock_lock);
-		net->sock = sock;
-		net->flags &= ~NET_FLAG_CREATING_SOCKET;
-		spin_unlock(&net->sock_lock);
+	netprintk("attempt to connect from %u.%u.%u.%u:%04x\n", 
+		NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
+
+	inode = nm_get_node_by_ip(sin.sin_addr.s_addr);
+	if (inode == NULL) {
+		netprintk0("connect from unknown host...\n");
+		net_send_error(sock, NET_UNKNOWN_HOST);
 		goto out;
 	}
 
-	sk = sock->sk;
-	switch (err) {
-		case -EALREADY:
-		case -EINPROGRESS:
-					
-			/* TODO: awful awful awful */
-			for (i=0; i<100; i++) {
-				/* Protect against TCP socket state changes */
-				lock_sock(sk);
-				if (sk->sk_state == TCP_ESTABLISHED) {
-					release_sock(sk);
-					netprintk0("woo!  connected...\n");
-					err = 0;
-					spin_lock(&net->sock_lock);
-					net->flags &= ~NET_FLAG_CREATING_SOCKET;
-					net->sock = sock;
-					spin_unlock(&net->sock_lock);
-					break;
-				} else {
-					netprintk("waiting for connection: pass %d, state %d\n", i, sk->sk_state);
-					/* TODO */
-#if 0
-					task->tk_timeout = RPC_CONNECT_TIMEOUT;
-					/* if the socket is already closing, delay briefly */
-					if ((1<<sk->state) & ~(TCPF_SYN_SENT|TCPF_SYN_RECV))
-						task->tk_timeout = RPC_REESTABLISH_TIMEOUT;
-					rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL);
-#endif
-					/* TODO: this is awful... change it later */
-				}
-				release_sock(sk);
-				util_sleep(100);
-			}
-			break;
-		case -ECONNREFUSED:
-		case -ECONNRESET:
-		case -ENOTCONN:
-			netprintk("conn refused, reset or not connected\n");
-			break;
-		default:
-			/* Report myriad other possible returns.  If this file
-			* system is soft mounted, just error out, like Solaris.  */
-			netprintk("error %d connecting to server\n", err);
-			/* TODO */
-#if 0
-			/* This will prevent anybody else from connecting */
-			rpc_delay(task, RPC_REESTABLISH_TIMEOUT);
-			task->tk_status = status;
-#endif
-			break;
-	}
+	priv = inode->u.generic_ip;
+	BUG_ON(priv == NULL);
 
+	netprintk("connect from known host: %s\n", priv->node.node_name);
+
+	if (ntohs(sin.sin_port) >= 1024)
+		netprintk("warning: connect from unprivileged port: "
+			  "%u.%u.%u.%u:%d\n", NIPQUAD(sin.sin_addr.s_addr),
+			  ntohs(sin.sin_port));
+
+	error = net_attach_sock(&priv->net, sock);
+	if (error == -EEXIST)
+		net_send_error(sock, NET_ALREADY_CONNECTED);
+
 out:
-	if (err < 0) {
-		if (net) {
-			spin_lock(&net->sock_lock);
-			if (net->sock)
-				netprintk0("wha?! there's a socket there already!!!!\n");
-			net->flags &= ~NET_FLAG_CREATING_SOCKET;
-			spin_unlock(&net->sock_lock);
+	if (error) {
+		if (sock) {
+			net_sock_drain(sock);
+			sock_release(sock);
 		}
-	       	if (sock) 
-			sock_release(sock);
-	} else {
-		net_record_new_sock(net);
+		if (inode)
+			iput(inode);
 	}
-
-	return err;
+	return;
 }
 
-
-
-/*
- * net_init_tcp_recv_sock()
- *
- */
 static int net_init_tcp_recv_sock(void)
 {
 	struct sockaddr_in sin;
-	int status = -EINVAL;
+	int error;
 
-	/* Create Receive Socket */
-	status = sock_create(net_ip_version_to_family(ip_version),
+	error = sock_create(net_ip_version_to_family(ip_version),
 			     SOCK_STREAM, IPPROTO_TCP,
 			     &recv_sock);
-	if (status < 0) {
-		netprintk ("unable to create socket, error=%d\n", status);
+	if (error < 0) {
+		netprintk("unable to create socket, error=%d\n", error);
 		goto bail;
 	}
 
-
-	/* Bind Receive Socket */
 	memset(&sin, 0, sizeof(sin));
 	sin.sin_family = net_ip_version_to_family(ip_version);
 	sin.sin_addr.s_addr = htonl(INADDR_ANY);
 	sin.sin_port = ip_port;
 
-	status = recv_sock->ops->bind(recv_sock,
-					 (struct sockaddr *)&sin,
-					 sizeof(sin));
-	if (status < 0) {
+	error = recv_sock->ops->bind(recv_sock, (struct sockaddr *)&sin,
+				      sizeof(sin));
+	if (error < 0) {
 		netprintk ("unable to bind socket to port %d, error=%d\n", 
-			ntohs(ip_port), status);
+			ntohs(ip_port), error);
+		goto bail;
 	}
 
 	/* !!! dunno about these... */
 	recv_sock->sk->sk_reuse = 1;
-	status = recv_sock->ops->listen(recv_sock, 64);
+	error = recv_sock->ops->listen(recv_sock, 64);
 
 bail:
-	return status;
+	if (error && recv_sock) {
+		sock_release(recv_sock);
+		recv_sock = NULL;
+	}
+	return error;
 }				/* net_init_tcp_recv_sock */
 
 
 static void net_release_tcp_sock(void)
 {
 	if (recv_sock) {
-		sock_release (recv_sock);
+		sock_release(recv_sock);
 		recv_sock = NULL;
 	}
 }



More information about the Ocfs2-commits mailing list