[rds-commits] zab commits r121 - trunk/linux/net/rds

svn-commits@oss.oracle.com svn-commits at oss.oracle.com
Wed Jun 28 19:27:55 CDT 2006


Author: zab
Date: 2006-06-28 19:27:54 -0500 (Wed, 28 Jun 2006)
New Revision: 121

Modified:
   trunk/linux/net/rds/ib.h
   trunk/linux/net/rds/ib_cm.c
   trunk/linux/net/rds/ib_recv.c
Log:
More progress towards working IB send and receive.

Ha ha, 11b is 0x3, not 0x11.

Be sure to init the list_head that holds the recvs queued in an IB incoming.

Since we're filling the receive queue by allocating and mapping pages we'll
only do so from the thread.  Trigger refilling when the recv queue is half
full.


Modified: trunk/linux/net/rds/ib.h
===================================================================
--- trunk/linux/net/rds/ib.h	2006-06-28 21:54:13 UTC (rev 120)
+++ trunk/linux/net/rds/ib.h	2006-06-29 00:27:54 UTC (rev 121)
@@ -12,10 +12,10 @@
  */
 #define RDS_IB_KNOWN_PORT		18633
 #define RDS_IB_RESOLVE_TIMEOUT_MS	5000
-#define RDS_IB_MAX_RECV_BUFS		500
-#define RDS_IB_MAX_SEND_BUFS		100
+#define RDS_IB_MAX_RECV_BUFS		32 /* XXX small for debugging */
+#define RDS_IB_MAX_SEND_BUFS		32
 
-#define RDS_IB_WR_ID_MASK		0x11
+#define RDS_IB_WR_ID_MASK		0x3
 #define RDS_IB_WR_ID_HEADER		0
 #define RDS_IB_WR_ID_DATA		1
 #define RDS_IB_WR_ID_RECV		2
@@ -103,7 +103,6 @@
 void rds_ib_inc_process_acks(struct rds_connection *conn,
 			      struct rds_incoming *inc, u16 nr);
 void rds_ib_recv_complete(struct rds_connection *conn, u64 wr_id);
-int rds_ib_recv_refill(struct rds_connection *conn);
 
 /* ib_send.c */
 int __init rds_ib_send_init(void);

Modified: trunk/linux/net/rds/ib_cm.c
===================================================================
--- trunk/linux/net/rds/ib_cm.c	2006-06-28 21:54:13 UTC (rev 120)
+++ trunk/linux/net/rds/ib_cm.c	2006-06-29 00:27:54 UTC (rev 121)
@@ -177,8 +177,6 @@
 	rdsdebug("conn %p pd %p mr %p cq %p\n", conn, ic->i_pd, ic->i_mr,
 		 ic->i_cq);
 
-	rds_ib_recv_refill(conn);
-
 out:
 	return ret;
 }

Modified: trunk/linux/net/rds/ib_recv.c
===================================================================
--- trunk/linux/net/rds/ib_recv.c	2006-06-28 21:54:13 UTC (rev 120)
+++ trunk/linux/net/rds/ib_recv.c	2006-06-29 00:27:54 UTC (rev 121)
@@ -83,7 +83,9 @@
 	 */
 	BUG_ON(ibinc == NULL);
 
+	INIT_LIST_HEAD(&ibinc->ii_recvs);
 	rds_inc_init(&ibinc->ii_inc, conn, conn->c_faddr);
+
 	addr = kmap_atomic(page, KM_SOFTIRQ0);
 	memcpy(&ibinc->ii_inc.i_hdr, addr, sizeof(struct rds_header));
 	kunmap_atomic(addr, KM_SOFTIRQ0);
@@ -186,7 +188,8 @@
 	/* XXX we could be posting lots of these at once */
 	atomic_inc(&ic->i_recv_posted);
 	ret = ib_post_recv(ic->i_cm_id->qp, &recv->ir_wr, &failed);
-	rdsdebug("recv %p post ret %d\n", recv, ret);
+	rdsdebug("recv %p post ret %d posted %d\n", recv, ret,
+		 atomic_read(&ic->i_recv_posted));
 	if (ret) {
 		/* XXX if posting fails kick the thread to refill? */
 		rds_ib_recv_unmap(ic, recv);
@@ -207,6 +210,7 @@
 	struct rds_ib_incoming *ibinc = ic->i_ibinc;
 
 	rdsdebug("ic %p recv %p sge len %u\n", ic, recv, recv->ir_sge.length);
+
 	atomic_dec(&ic->i_recv_posted);
 
 	/* 
@@ -224,6 +228,13 @@
 		return;
 	}
 
+	/* 
+	 * pretty sure we shouldn't call dma_map_page from int, so we 
+	 * fall back to having the thread refill
+	 */
+	if (atomic_read(&ic->i_recv_posted) < RDS_IB_MAX_RECV_BUFS / 2)
+		queue_work(rds_wq, &conn->c_recv_work);
+
 	rds_ib_recv_unmap(ic, recv);
 	list_add_tail(&recv->ir_item, &ibinc->ii_recvs);
 
@@ -236,11 +247,9 @@
 		rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr,
 				  &ibinc->ii_inc, GFP_ATOMIC, KM_SOFTIRQ0);
 	}
-
-	rds_ib_recv_refill(conn);
 }
 
-int rds_ib_recv_refill(struct rds_connection *conn)
+int rds_ib_recv_refill(struct rds_connection *conn, gfp_t gfp)
 {
 	struct rds_ib_connection *ic = conn->c_transport_data;
 	struct rds_ib_recv *recv;
@@ -248,7 +257,7 @@
 
 	/* eh, we don't really care about this race */
 	while (atomic_read(&ic->i_recv_posted) < RDS_IB_MAX_RECV_BUFS) {
-		recv = kmem_cache_alloc(rds_ib_recv_slab, GFP_KERNEL);
+		recv = kmem_cache_alloc(rds_ib_recv_slab, gfp);
 		if (recv == NULL) {
 			ret = -ENOMEM;
 			break;
@@ -260,7 +269,7 @@
 		 * order so that we know to unmap only as the final offset
 		 * is completed.
 		 */
-		recv->ir_page = alloc_page(GFP_KERNEL);
+		recv->ir_page = alloc_page(gfp);
 		if (recv->ir_page == NULL) {
 			kmem_cache_free(rds_ib_recv_slab, recv);
 			ret = -ENOMEM;
@@ -293,7 +302,7 @@
 /* XXX have this also poll the conn's completion queue */
 int rds_ib_recv(struct rds_connection *conn)
 {
-	return rds_ib_recv_refill(conn);
+	return rds_ib_recv_refill(conn, GFP_KERNEL);
 }
 
 int __init rds_ib_recv_init(void)




More information about the rds-commits mailing list