[rds-commits] zab commits r137 - trunk/linux/net/rds
svn-commits@oss.oracle.com
svn-commits at oss.oracle.com
Wed Jul 26 19:09:13 CDT 2006
Author: zab
Date: 2006-07-26 19:09:12 -0500 (Wed, 26 Jul 2006)
New Revision: 137
Modified:
trunk/linux/net/rds/ib_recv.c
Log:
Correctly copy IB headers that straddle the posted fragment and header regions.
Modified: trunk/linux/net/rds/ib_recv.c
===================================================================
--- trunk/linux/net/rds/ib_recv.c 2006-07-26 23:01:15 UTC (rev 136)
+++ trunk/linux/net/rds/ib_recv.c 2006-07-27 00:09:12 UTC (rev 137)
@@ -264,6 +264,41 @@
}
/*
+ * Work is posted as a RDS_FRAG_SIZE payload and then a header. This is
+ * done so that we can send fragments without headers and keep the fragments
+ * large and aligned. The sender doesn't pad their fragments so the header
+ * will spill into the posted regions just after the fragment.
+ *
+ * XXX If we were to flip r_page into userspace or the page cache then we'd
+ * have to zero the header and possibly the rest of the page.
+ */
+static void rds_ib_copy_header(struct rds_ib_connection *ic,
+ struct rds_ib_incoming *ibinc,
+ struct rds_ib_recv_work *recv, u32 byte_len)
+{
+ void *addr;
+ u32 start;
+
+ BUG_ON(byte_len < sizeof(struct rds_header));
+
+ /* get the start of the header from the tail of the fragment */
+ start = byte_len - sizeof(struct rds_header);
+ if (start < RDS_FRAG_SIZE) {
+ addr = kmap_atomic(recv->r_page, KM_SOFTIRQ0);
+ memcpy(&ibinc->ii_inc.i_hdr, addr + start,
+ min(RDS_FRAG_SIZE - start, sizeof(struct rds_header)));
+ kunmap_atomic(addr, KM_SOFTIRQ0);
+ }
+
+ /* and the rest that might have spilled into the posted header space */
+ if (byte_len > RDS_FRAG_SIZE) {
+ memcpy(&ibinc->ii_inc.i_hdr,
+ &ic->i_recv_hdrs[recv - ic->i_recvs],
+ byte_len - RDS_FRAG_SIZE);
+ }
+}
+
+/*
* Rings are posted with all the allocations they'll need to queue the
* incoming message to the receiving socket so this can't fail. It relies
* on being called in the order that the sender sent in to infer which
@@ -287,25 +322,24 @@
* off its list.
*/
if (ibinc == NULL) {
+ if (byte_len < sizeof(struct rds_header)) {
+ rds_ib_recv_unmap_page(ic, recv);
+ if (!ic->i_wc_err) {
+ ic->i_wc_err = 1;
+ printk(KERN_WARNING "RDS/IB: incoming message "
+ "from %u.%u.%u.%u didn't inclue a "
+ "header, disconnecting and "
+ "reconnecting\n",
+ NIPQUAD(conn->c_faddr));
+ queue_work(rds_wq, &conn->c_shutdown_work);
+ }
+ }
+
ibinc = recv->r_ibinc;
recv->r_ibinc = NULL;
- if (byte_len <= RDS_FRAG_SIZE) {
- /*
- * XXX The remainter of the page will need to be zeroed
- * if we map it to userspace or flip it into the page
- * cache.
- */
- void *addr = kmap_atomic(recv->r_page, KM_SOFTIRQ0);
- memcpy(&ibinc->ii_inc.i_hdr,
- addr + byte_len - sizeof(struct rds_header),
- sizeof(struct rds_header));
- kunmap_atomic(addr, KM_SOFTIRQ0);
- } else {
- int i = recv - ic->i_recvs;
- memcpy(&ibinc->ii_inc.i_hdr, &ic->i_recv_hdrs[i],
- sizeof(struct rds_header));
- }
+ rds_ib_copy_header(ic, ibinc, recv, byte_len);
+
ic->i_ibinc = ibinc;
ic->i_recv_data_rem = be32_to_cpu(ibinc->ii_inc.i_hdr.h_len);
rdsdebug("ic %p ibinc %p rem %u\n", ic, ibinc,
More information about the rds-commits
mailing list