[rds-commits] zab commits r117 - trunk/linux/net/rds

svn-commits@oss.oracle.com svn-commits at oss.oracle.com
Wed Jun 28 13:44:54 CDT 2006


Author: zab
Date: 2006-06-28 13:44:52 -0500 (Wed, 28 Jun 2006)
New Revision: 117

Modified:
   trunk/linux/net/rds/connection.c
   trunk/linux/net/rds/flow.c
   trunk/linux/net/rds/ib_cm.c
   trunk/linux/net/rds/tcp_listen.c
Log:
Make connection initiation explicit.

Move the queueing of connect work to the caller of conn allocation.   This lets
listen avoid having to queue and race with connection initiation.


Modified: trunk/linux/net/rds/connection.c
===================================================================
--- trunk/linux/net/rds/connection.c	2006-06-27 23:53:01 UTC (rev 116)
+++ trunk/linux/net/rds/connection.c	2006-06-28 18:44:52 UTC (rev 117)
@@ -179,10 +179,8 @@
 		trans->conn_free(conn->c_transport_data);
 		kmem_cache_free(rds_conn_slab, conn);
 		conn = tmp;
-	} else {
+	} else
 		hlist_add_head(&conn->c_hash_node, head);
-		queue_work(rds_wq, &conn->c_connect_work);
-	}
 	spin_unlock_irqrestore(&rds_conn_lock, flags);
 
 out:

Modified: trunk/linux/net/rds/flow.c
===================================================================
--- trunk/linux/net/rds/flow.c	2006-06-27 23:53:01 UTC (rev 116)
+++ trunk/linux/net/rds/flow.c	2006-06-28 18:44:52 UTC (rev 117)
@@ -113,6 +113,10 @@
 		goto out;
 	}
 
+	if (!test_bit(RDS_CONN_CONNECTING, &conn->c_status) &&
+	    !test_bit(RDS_CONN_CONNECTED, &conn->c_status))
+		queue_work(rds_wq, &conn->c_connect_work);
+
 	flow = kmem_cache_alloc(rds_flow_slab, gfp);
 	if (flow == NULL) {
 		ret = -ENOMEM;

Modified: trunk/linux/net/rds/ib_cm.c
===================================================================
--- trunk/linux/net/rds/ib_cm.c	2006-06-27 23:53:01 UTC (rev 116)
+++ trunk/linux/net/rds/ib_cm.c	2006-06-28 18:44:52 UTC (rev 117)
@@ -193,6 +193,10 @@
 		goto out;
 	}
 
+	/* XXX this notices the race */
+	if (test_and_set_bit(RDS_CONN_CONNECTING, &conn->c_status))
+		BUG();
+
 	/* XXX this seems totally crazy. */
 	ic = conn->c_transport_data;
 	ic->i_cm_id = cm_id;

Modified: trunk/linux/net/rds/tcp_listen.c
===================================================================
--- trunk/linux/net/rds/tcp_listen.c	2006-06-27 23:53:01 UTC (rev 116)
+++ trunk/linux/net/rds/tcp_listen.c	2006-06-28 18:44:52 UTC (rev 117)
@@ -35,7 +35,6 @@
 {
 	struct socket *new_sock = NULL;
 	struct rds_connection *conn;
-	struct rds_tcp_connection *tc;
 	int ret;
 #ifdef KERNEL_HAS_INET_SK_RETURNING_INET_SOCK
 	struct inet_sock *inet;
@@ -71,9 +70,11 @@
 	}
 
 	/* 
-	 * If rds_conn_create() gave us a conn then it will have already called
-	 * tcp's conn_alloc.  We only install this new socket if we don't
-	 * already have a socket.
+	 * Only associate this newly accepted socket with the connection if
+	 * the connection wasn't already in the process of trying to connect
+	 * to the remote peer.  If the connection was already connecting
+	 * then we drop this new socket and let the initiated connection
+	 * make forward progress.
 	 *
 	 * XXX This doesn't deal particularly gracefully with racing connects.
 	 * Both ends will refuse the newly accepted sockets in prefernce to the
@@ -83,14 +84,12 @@
 	 * could make it random within a delay that increases exponentially.
 	 * Particularly it'd have to be greater than a few round trips.
 	 */
-	tc = conn->c_transport_data;
-	if (tc->t_sock == NULL) {
+	if (!test_and_set_bit(RDS_CONN_CONNECTING, &conn->c_status)) {
 		rds_tcp_set_callbacks(new_sock, conn);
 		rds_connect_complete(conn);
 		new_sock = NULL;
 	} else
-		rdsdebug("tc %p already has sock %p, closing new sock %p\n",
-			 tc, tc->t_sock, new_sock);
+		rdsdebug("already connected, closing new sock %p\n", new_sock);
 
 	ret = 0;
 




More information about the rds-commits mailing list