2010-02-02 16:08:15

by William Allen Simpson

[permalink] [raw]
Subject: [PATCH 0/7] tcp: bugs and cleanup updated to 2.6.33-rc6

Combination of patches reported in October, November, December, and
January. These patches fix perceived bugs, and other cleanup.

This code has had previous review and several months of limited testing.

Some portions were removed during the various TCPCT part 1 patch splits,
then were cut off by the sudden unexpected end of that merge window.
[03 Dec 2009] I've restarted the sub-numbering (again).

Of particular interest are the TCPCT header extensions that already
appear in the next phase of testing with other platforms. These patches
allow correct reception without data corruption.

The remainder of the original TCPCT part 2 will be merged with part 3.

These patches are against the current linux-2.6 tree.

[No substantive changes from previous submission of January 23rd.]


2010-02-02 16:12:33

by William Allen Simpson

[permalink] [raw]
Subject: [PATCH 1/7] net: tcp_header_len_th and tcp_option_len_th

diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 7fee8a4..d0133cf 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -223,6 +223,18 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb)
return (tcp_hdr(skb)->doff - 5) * 4;
}

+/* Length of fixed header plus standard options. */
+static inline unsigned int tcp_header_len_th(const struct tcphdr *th)
+{
+ return th->doff * 4;
+}
+
+/* Length of standard options only. This could be negative. */
+static inline int tcp_option_len_th(const struct tcphdr *th)
+{
+ return (int)(th->doff * 4) - sizeof(*th);
+}
+
/* This defines a selective acknowledgement block. */
struct tcp_sack_block_wire {
__be32 start_seq;
--
1.6.3.3


Attachments:
len_th+2a3+2.6.33-rc6.patch (719.00 B)

2010-02-02 16:18:01

by William Allen Simpson

[permalink] [raw]
Subject: [PATCH 2/7] net: remove old tcp_optlen function

diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 65df1de..45452c5 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -6352,6 +6352,8 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
/* Called with netif_tx_lock.
* bnx2_tx_int() runs without netif_tx_lock unless it needs to call
* netif_wake_queue().
+ *
+ * No TCP or IP length checking, per David Miller (see commit log).
*/
static netdev_tx_t
bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -6396,19 +6398,19 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
}
#endif
- if ((mss = skb_shinfo(skb)->gso_size)) {
- u32 tcp_opt_len;
- struct iphdr *iph;
+ mss = skb_shinfo(skb)->gso_size;
+ if (mss != 0) {
+ struct tcphdr *th = tcp_hdr(skb);
+ int tcp_opt_words = th->doff - (sizeof(*th) >> 2);
+ /* assumes positive tcp_opt_words without checking */

vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;

- tcp_opt_len = tcp_optlen(skb);
-
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
u32 tcp_off = skb_transport_offset(skb) -
sizeof(struct ipv6hdr) - ETH_HLEN;

- vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
+ vlan_tag_flags |= (tcp_opt_words << 8) |
TX_BD_FLAGS_SW_FLAGS;
if (likely(tcp_off == 0))
vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
@@ -6421,14 +6423,15 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
}
} else {
- iph = ip_hdr(skb);
- if (tcp_opt_len || (iph->ihl > 5)) {
- vlan_tag_flags |= ((iph->ihl - 5) +
- (tcp_opt_len >> 2)) << 8;
- }
+ struct iphdr *iph = ip_hdr(skb);
+ int ip_opt_words = iph->ihl - (sizeof(*iph) >> 2);
+ /* assumes positive ip_opt_words without checking */
+ int opt_words = ip_opt_words + tcp_opt_words;
+
+ if (opt_words > 0)
+ vlan_tag_flags |= opt_words << 8;
}
- } else
- mss = 0;
+ }

mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(bp->pdev, mapping)) {
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 7f82b02..c20c800 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -5426,6 +5426,8 @@ static void tg3_set_txd(struct tg3_napi *tnapi, int entry,

/* hard_start_xmit for devices that don't have any bugs and
* support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
+ *
+ * No TCP or IP length checking, per David Miller (see commit log).
*/
static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
struct net_device *dev)
@@ -5461,9 +5463,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,

entry = tnapi->tx_prod;
base_flags = 0;
- mss = 0;
- if ((mss = skb_shinfo(skb)->gso_size) != 0) {
- int tcp_opt_len, ip_tcp_len;
+ mss = skb_shinfo(skb)->gso_size;
+ if (mss != 0) {
+ struct tcphdr *th;
u32 hdrlen;

if (skb_header_cloned(skb) &&
@@ -5471,18 +5473,16 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
dev_kfree_skb(skb);
goto out_unlock;
}
+ th = tcp_hdr(skb);

if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
hdrlen = skb_headlen(skb) - ETH_HLEN;
else {
struct iphdr *iph = ip_hdr(skb);

- tcp_opt_len = tcp_optlen(skb);
- ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
-
+ hdrlen = ip_hdrlen(skb) + tcp_header_len_th(th);
+ iph->tot_len = htons(mss + hdrlen);
iph->check = 0;
- iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
- hdrlen = ip_tcp_len + tcp_opt_len;
}

if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
@@ -5496,7 +5496,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
base_flags |= (TXD_FLAG_CPU_PRE_DMA |
TXD_FLAG_CPU_POST_DMA);

- tcp_hdr(skb)->check = 0;
+ th->check = 0;

}
else if (skb->ip_summed == CHECKSUM_PARTIAL)
@@ -5629,6 +5629,8 @@ tg3_tso_bug_end:

/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
* support TG3_FLG2_HW_TSO_1 or firmware TSO only.
+ *
+ * No TCP or IP length checking, per David Miller (see commit log).
*/
static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
struct net_device *dev)
@@ -5668,20 +5670,21 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_PARTIAL)
base_flags |= TXD_FLAG_TCPUDP_CSUM;

- if ((mss = skb_shinfo(skb)->gso_size) != 0) {
+ mss = skb_shinfo(skb)->gso_size;
+ if (mss != 0) {
struct iphdr *iph;
- u32 tcp_opt_len, ip_tcp_len, hdr_len;
+ struct tcphdr *th;
+ u32 hdr_len;
+ int opt_bytes;

if (skb_header_cloned(skb) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
dev_kfree_skb(skb);
goto out_unlock;
}
+ th = tcp_hdr(skb);
+ hdr_len = ip_hdrlen(skb) + tcp_header_len_th(th);

- tcp_opt_len = tcp_optlen(skb);
- ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
-
- hdr_len = ip_tcp_len + tcp_opt_len;
if (unlikely((ETH_HLEN + hdr_len) > 80) &&
(tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
return (tg3_tso_bug(tp, skb));
@@ -5693,13 +5696,14 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
iph->check = 0;
iph->tot_len = htons(mss + hdr_len);
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
- tcp_hdr(skb)->check = 0;
+ th->check = 0;
base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
} else
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
+ th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ 0, IPPROTO_TCP, 0);
+
+ opt_bytes = hdr_len - sizeof(*iph) - sizeof(*th);
+ /* assumes positive opt_bytes without checking */

if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
mss |= (hdr_len & 0xc) << 12;
@@ -5710,19 +5714,11 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
mss |= hdr_len << 9;
else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
- if (tcp_opt_len || iph->ihl > 5) {
- int tsflags;
-
- tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
- mss |= (tsflags << 11);
- }
+ if (opt_bytes > 0)
+ mss |= opt_bytes << (11 - 2);
} else {
- if (tcp_opt_len || iph->ihl > 5) {
- int tsflags;
-
- tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
- base_flags |= tsflags << 12;
- }
+ if (opt_bytes > 0)
+ base_flags |= opt_bytes << (12 - 2);
}
}
#if TG3_VLAN_TAG_USED
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index d0133cf..74728f7 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -218,11 +218,6 @@ static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
return tcp_hdr(skb)->doff * 4;
}

-static inline unsigned int tcp_optlen(const struct sk_buff *skb)
-{
- return (tcp_hdr(skb)->doff - 5) * 4;
-}
-
/* Length of fixed header plus standard options. */
static inline unsigned int tcp_header_len_th(const struct tcphdr *th)
{
--
1.6.3.3


Attachments:
len_th+2b3+2.6.33-rc6.patch (6.77 kB)

2010-02-02 16:24:45

by William Allen Simpson

[permalink] [raw]
Subject: [PATCH v5 3/7] tcp: harmonize tcp_vx_rcv header length assumptions

diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 60c2770..81492a1 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -975,6 +975,13 @@ xfrm_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x, unsigned short
}

#ifdef CONFIG_XFRM
+/*
+ * For transport, the policy is checked before the presumed more expensive
+ * checksum. The transport header has already been checked for size, and is
+ * guaranteed to be contiguous. These policies must not alter the header or
+ * its position in the buffer, and should not shorten the buffer length
+ * without ensuring the length remains >= the header size.
+ */
extern int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, unsigned short family);

static inline int __xfrm_policy_check2(struct sock *sk, int dir,
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 65b8ebf..0a76e41 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1559,7 +1559,8 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
}

- if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
+ /* Assumes header and options unchanged since checksum_init() */
+ if (tcp_checksum_complete(skb))
goto csum_err;

if (sk->sk_state == TCP_LISTEN) {
@@ -1601,14 +1602,13 @@ csum_err:
}

/*
- * From tcp_input.c
+ * Called by ip_input.c: ip_local_deliver_finish()
*/
-
int tcp_v4_rcv(struct sk_buff *skb)
{
- const struct iphdr *iph;
struct tcphdr *th;
struct sock *sk;
+ int tcp_header_len;
int ret;
struct net *net = dev_net(skb->dev);

@@ -1618,31 +1618,33 @@ int tcp_v4_rcv(struct sk_buff *skb)
/* Count it even if it's bad */
TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);

+ /* Check too short header */
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
goto discard_it;

- th = tcp_hdr(skb);
-
- if (th->doff < sizeof(struct tcphdr) / 4)
+ /* Check bad doff, compare doff directly to constant value */
+ tcp_header_len = tcp_hdr(skb)->doff;
+ if (tcp_header_len < (sizeof(struct tcphdr) / 4))
goto bad_packet;
- if (!pskb_may_pull(skb, th->doff * 4))
+
+ /* Check too short header and options */
+ tcp_header_len *= 4;
+ if (!pskb_may_pull(skb, tcp_header_len))
goto discard_it;

- /* An explanation is required here, I think.
- * Packet length and doff are validated by header prediction,
- * provided case of th->doff==0 is eliminated.
- * So, we defer the checks. */
+ /* Packet length and doff are validated by header prediction,
+ * provided case of th->doff == 0 is eliminated (above).
+ */
if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
goto bad_packet;

th = tcp_hdr(skb);
- iph = ip_hdr(skb);
TCP_SKB_CB(skb)->seq = ntohl(th->seq);
TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
- skb->len - th->doff * 4);
+ skb->len - tcp_header_len);
TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
TCP_SKB_CB(skb)->when = 0;
- TCP_SKB_CB(skb)->flags = iph->tos;
+ TCP_SKB_CB(skb)->flags = ip_hdr(skb)->tos;
TCP_SKB_CB(skb)->sacked = 0;

sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
@@ -1682,14 +1684,14 @@ process:
bh_unlock_sock(sk);

sock_put(sk);
-
return ret;

no_tcp_socket:
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard_it;

- if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
+ /* Assumes header and options unchanged since checksum_init() */
+ if (tcp_checksum_complete(skb)) {
bad_packet:
TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
} else {
@@ -1711,18 +1713,21 @@ do_time_wait:
goto discard_it;
}

- if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
+ /* Assumes header and options unchanged since checksum_init() */
+ if (tcp_checksum_complete(skb)) {
TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
inet_twsk_put(inet_twsk(sk));
goto discard_it;
}
+
switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
case TCP_TW_SYN: {
struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
&tcp_hashinfo,
- iph->daddr, th->dest,
+ ip_hdr(skb)->daddr,
+ th->dest,
inet_iif(skb));
- if (sk2) {
+ if (sk2 != NULL) {
inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
inet_twsk_put(inet_twsk(sk));
sk = sk2;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index febfd59..b76939a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1594,7 +1594,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
}

- if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
+ /* Assumes header and options unchanged since checksum_init() */
+ if (tcp_checksum_complete(skb))
goto csum_err;

if (sk->sk_state == TCP_LISTEN) {
@@ -1664,38 +1665,47 @@ ipv6_pktoptions:
return 0;
}

+/*
+ * Called by ip6_input.c: ip6_input_finish()
+ */
static int tcp_v6_rcv(struct sk_buff *skb)
{
struct tcphdr *th;
struct sock *sk;
+ int tcp_header_len;
int ret;
struct net *net = dev_net(skb->dev);

if (skb->pkt_type != PACKET_HOST)
goto discard_it;

- /*
- * Count it even if it's bad.
- */
+ /* Count it even if it's bad */
TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);

+ /* Check too short header */
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
goto discard_it;

- th = tcp_hdr(skb);
-
- if (th->doff < sizeof(struct tcphdr)/4)
+ /* Check bad doff, compare doff directly to constant value */
+ tcp_header_len = tcp_hdr(skb)->doff;
+ if (tcp_header_len < (sizeof(struct tcphdr) / 4))
goto bad_packet;
- if (!pskb_may_pull(skb, th->doff*4))
+
+ /* Check too short header and options */
+ tcp_header_len *= 4;
+ if (!pskb_may_pull(skb, tcp_header_len))
goto discard_it;

+ /* Packet length and doff are validated by header prediction,
+ * provided case of th->doff == 0 is eliminated (above).
+ */
if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
goto bad_packet;

th = tcp_hdr(skb);
TCP_SKB_CB(skb)->seq = ntohl(th->seq);
TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
- skb->len - th->doff*4);
+ skb->len - tcp_header_len);
TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
TCP_SKB_CB(skb)->when = 0;
TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
@@ -1711,6 +1721,7 @@ process:

if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
+ /* nf_reset(skb); in ip6_input.c ip6_input_finish() */

if (sk_filter(sk, skb))
goto discard_and_relse;
@@ -1743,7 +1754,8 @@ no_tcp_socket:
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard_it;

- if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
+ /* Assumes header and options unchanged since checksum_init() */
+ if (tcp_checksum_complete(skb)) {
bad_packet:
TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
} else {
@@ -1751,11 +1763,7 @@ bad_packet:
}

discard_it:
-
- /*
- * Discard frame
- */
-
+ /* Discard frame. */
kfree_skb(skb);
return 0;

@@ -1769,24 +1777,23 @@ do_time_wait:
goto discard_it;
}

- if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
+ /* Assumes header and options unchanged since checksum_init() */
+ if (tcp_checksum_complete(skb)) {
TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
inet_twsk_put(inet_twsk(sk));
goto discard_it;
}

switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
- case TCP_TW_SYN:
- {
- struct sock *sk2;
-
- sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
- &ipv6_hdr(skb)->daddr,
- ntohs(th->dest), inet6_iif(skb));
+ case TCP_TW_SYN: {
+ struct sock *sk2 = inet6_lookup_listener(dev_net(skb->dev),
+ &tcp_hashinfo,
+ &ipv6_hdr(skb)->daddr,
+ ntohs(th->dest),
+ inet6_iif(skb));
if (sk2 != NULL) {
- struct inet_timewait_sock *tw = inet_twsk(sk);
- inet_twsk_deschedule(tw, &tcp_death_row);
- inet_twsk_put(tw);
+ inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
+ inet_twsk_put(inet_twsk(sk));
sk = sk2;
goto process;
}
--
1.6.3.3


Attachments:
len_th+2c5+2.6.33-rc6.patch (7.85 kB)

2010-02-02 16:28:21

by William Allen Simpson

[permalink] [raw]
Subject: [PATCH v4 4/7] tcp: input header length, prediction, and timestamp bugs

diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 74728f7..2987ee8 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -301,7 +301,11 @@ struct tcp_sock {

/*
* Header prediction flags
- * 0x5?10 << 16 + snd_wnd in net byte order
+ * S << 28 + TCP_FLAG_ACK + snd_wnd, in net byte order
+ * (PSH flag is ignored)
+ * S is 5 (no options), or 8 (timestamp aligned)
+ * otherwise, 0 to turn it off -- for instance, when there are
+ * holes in receive space.
*/
__be32 pred_flags;

diff --git a/include/net/tcp.h b/include/net/tcp.h
index 34f5cc2..6b0d7e9 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -310,13 +310,11 @@ extern int tcp_ioctl(struct sock *sk,

extern int tcp_rcv_state_process(struct sock *sk,
struct sk_buff *skb,
- struct tcphdr *th,
- unsigned len);
+ struct tcphdr *th);

extern int tcp_rcv_established(struct sock *sk,
struct sk_buff *skb,
- struct tcphdr *th,
- unsigned len);
+ struct tcphdr *th);

extern void tcp_rcv_space_adjust(struct sock *sk);

@@ -533,9 +531,16 @@ static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
return (tp->srtt >> 3) + tp->rttvar;
}

+static inline u16 __tcp_fast_path_header_length(const struct tcp_sock *tp)
+{
+ return tp->rx_opt.tstamp_ok
+ ? sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED
+ : sizeof(struct tcphdr);
+}
+
static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
{
- tp->pred_flags = htonl((tp->tcp_header_len << 26) |
+ tp->pred_flags = htonl((__tcp_fast_path_header_length(tp) << (28 - 2)) |
ntohl(TCP_FLAG_ACK) |
snd_wnd);
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 28e0296..8e0f6ae 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -152,7 +152,7 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
* tcp header plus fixed timestamp option length.
* Resulting "len" is MSS free of SACK jitter.
*/
- len -= tcp_sk(sk)->tcp_header_len;
+ len -= __tcp_fast_path_header_length(tcp_sk(sk));
icsk->icsk_ack.last_seg_size = len;
if (len == lss) {
icsk->icsk_ack.rcv_mss = len;
@@ -5206,7 +5206,7 @@ discard:
* tcp_data_queue when everything is OK.
*/
int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
- struct tcphdr *th, unsigned len)
+ struct tcphdr *th)
{
struct tcp_sock *tp = tcp_sk(sk);
int res;
@@ -5225,31 +5225,15 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
* extra cost of the net_bh soft interrupt processing...
* We do checksum and copy also but from device to kernel.
*/
-
- tp->rx_opt.saw_tstamp = 0;
-
- /* pred_flags is 0xS?10 << 16 + snd_wnd
- * if header_prediction is to be made
- * 'S' will always be tp->tcp_header_len >> 2
- * '?' will be 0 for the fast path, otherwise pred_flags is 0 to
- * turn it off (when there are holes in the receive
- * space for instance)
- * PSH flag is ignored.
- */
-
if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags &&
TCP_SKB_CB(skb)->seq == tp->rcv_nxt &&
!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) {
- int tcp_header_len = tp->tcp_header_len;
-
- /* Timestamp header prediction: tcp_header_len
- * is automatically equal to th->doff*4 due to pred_flags
- * match.
- */
+ int tcp_header_len = tcp_header_len_th(th);

- /* Check timestamp */
- if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) {
- /* No? Slow path! */
+ /* Timestamp header prediction */
+ if (tcp_header_len != sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) {
+ tp->rx_opt.saw_tstamp = 0; /* false */
+ } else {
if (!tcp_parse_aligned_timestamp(tp, th))
goto slow_path;

@@ -5264,35 +5248,12 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
*/
}

- if (len <= tcp_header_len) {
- /* Bulk data transfer: sender */
- if (len == tcp_header_len) {
- /* Predicted packet is in window by definition.
- * seq == rcv_nxt and rcv_wup <= rcv_nxt.
- * Hence, check seq<=rcv_wup reduces to:
- */
- if (tcp_header_len ==
- (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) &&
- tp->rcv_nxt == tp->rcv_wup)
- tcp_store_ts_recent(tp);
-
- /* We know that such packets are checksummed
- * on entry.
- */
- tcp_ack(sk, skb, 0);
- __kfree_skb(skb);
- tcp_data_snd_check(sk);
- return 0;
- } else { /* Header too small */
- TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
- goto discard;
- }
- } else {
+ if (tcp_header_len < skb->len) {
int eaten = 0;
int copied_early = 0;

if (tp->copied_seq == tp->rcv_nxt &&
- len - tcp_header_len <= tp->ucopy.len) {
+ skb->len - tcp_header_len <= tp->ucopy.len) {
#ifdef CONFIG_NET_DMA
if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
copied_early = 1;
@@ -5311,9 +5272,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
* seq == rcv_nxt and rcv_wup <= rcv_nxt.
* Hence, check seq<=rcv_wup reduces to:
*/
- if (tcp_header_len ==
- (sizeof(struct tcphdr) +
- TCPOLEN_TSTAMP_ALIGNED) &&
+ if (tp->rx_opt.saw_tstamp &&
tp->rcv_nxt == tp->rcv_wup)
tcp_store_ts_recent(tp);

@@ -5334,8 +5293,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
* seq == rcv_nxt and rcv_wup <= rcv_nxt.
* Hence, check seq<=rcv_wup reduces to:
*/
- if (tcp_header_len ==
- (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) &&
+ if (tp->rx_opt.saw_tstamp &&
tp->rcv_nxt == tp->rcv_wup)
tcp_store_ts_recent(tp);

@@ -5376,11 +5334,33 @@ no_ack:
else
sk->sk_data_ready(sk, 0);
return 0;
+ } else {
+ /* Bulk data transfer: sender
+ *
+ * tcp_header_len > skb->len never happens,
+ * already checked by tcp_v[4,6]_rcv()
+ *
+ * Predicted packet is in window by definition.
+ * seq == rcv_nxt and rcv_wup <= rcv_nxt.
+ * Hence, check seq<=rcv_wup reduces to:
+ */
+ if (tp->rx_opt.saw_tstamp &&
+ tp->rcv_nxt == tp->rcv_wup)
+ tcp_store_ts_recent(tp);
+
+ /* We know that such packets are checksummed
+ * on entry.
+ */
+ tcp_ack(sk, skb, 0);
+ __kfree_skb(skb);
+ tcp_data_snd_check(sk);
+ return 0;
}
}

slow_path:
- if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
+ /* Assumes header and options unchanged since checksum_init() */
+ if (tcp_checksum_complete_user(sk, skb))
goto csum_error;

/*
@@ -5416,7 +5396,7 @@ discard:
}

static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
- struct tcphdr *th, unsigned len)
+ struct tcphdr *th)
{
u8 *hash_location;
struct inet_connection_sock *icsk = inet_csk(sk);
@@ -5693,7 +5673,7 @@ reset_and_undo:
*/

int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
- struct tcphdr *th, unsigned len)
+ struct tcphdr *th)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
@@ -5740,7 +5720,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
goto discard;

case TCP_SYN_SENT:
- queued = tcp_rcv_synsent_state_process(sk, skb, th, len);
+ queued = tcp_rcv_synsent_state_process(sk, skb, th);
if (queued >= 0)
return queued;

diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 0a76e41..f999e06 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1551,7 +1551,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)

if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
TCP_CHECK_TIMER(sk);
- if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
+ if (tcp_rcv_established(sk, skb, tcp_hdr(skb))) {
rsk = sk;
goto reset;
}
@@ -1578,7 +1578,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
}

TCP_CHECK_TIMER(sk);
- if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
+ if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb))) {
rsk = sk;
goto reset;
}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index f206ee5..37b7536 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -718,8 +718,7 @@ int tcp_child_process(struct sock *parent, struct sock *child,
int state = child->sk_state;

if (!sock_owned_by_user(child)) {
- ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb),
- skb->len);
+ ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb));
/* Wakeup parent, send SIGIO */
if (state == TCP_SYN_RECV && child->sk_state != state)
parent->sk_data_ready(parent, 0);
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 9bc805d..de2a32e 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -88,7 +88,7 @@ static inline int tcp_probe_avail(void)
* Note: arguments must match tcp_rcv_established()!
*/
static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
- struct tcphdr *th, unsigned len)
+ struct tcphdr *th)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_sock *inet = inet_sk(sk);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index b76939a..3d08a4d 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1586,7 +1586,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)

if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
TCP_CHECK_TIMER(sk);
- if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
+ if (tcp_rcv_established(sk, skb, tcp_hdr(skb)))
goto reset;
TCP_CHECK_TIMER(sk);
if (opt_skb)
@@ -1618,7 +1618,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
}

TCP_CHECK_TIMER(sk);
- if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
+ if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb)))
goto reset;
TCP_CHECK_TIMER(sk);
if (opt_skb)
--
1.6.3.3


Attachments:
len_th+2d4+2.6.33-rc6.patch (9.70 kB)

2010-02-02 16:33:30

by William Allen Simpson

[permalink] [raw]
Subject: [PATCH v3 5/7] TCPCT part 2e: accept SYNACK data

diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 8e0f6ae..165040e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5395,6 +5395,12 @@ discard:
return 0;
}

+/*
+ * Returns:
+ * +1 on reset,
+ * 0 success and/or SYNACK data,
+ * -1 on discard.
+ */
static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
struct tcphdr *th)
{
@@ -5403,6 +5409,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_cookie_values *cvp = tp->cookie_values;
int saved_clamp = tp->rx_opt.mss_clamp;
+ int queued = 0;

tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0);

@@ -5509,6 +5516,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
- TCPOLEN_COOKIE_BASE;
int cookie_pair_size = cookie_size
+ cvp->cookie_desired;
+ int tcp_header_len = tcp_header_len_th(th);

/* A cookie extension option was sent and returned.
* Note that each incoming SYNACK replaces the
@@ -5524,6 +5532,19 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
hash_location, cookie_size);
cvp->cookie_pair_size = cookie_pair_size;
}
+
+ queued = skb->len - tcp_header_len;
+ if (queued > 0) {
+ /* Queue incoming transaction data. */
+ __skb_pull(skb, tcp_header_len);
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
+ skb_set_owner_r(skb, sk);
+ sk->sk_data_ready(sk, 0);
+ cvp->s_data_in = 1; /* true */
+ tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+ tp->rcv_wup = TCP_SKB_CB(skb)->end_seq;
+ tp->copied_seq = TCP_SKB_CB(skb)->seq + 1;
+ }
}

smp_mb();
@@ -5577,11 +5598,14 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
TCP_DELACK_MAX, TCP_RTO_MAX);

discard:
- __kfree_skb(skb);
+ if (queued <= 0)
+ __kfree_skb(skb);
return 0;
} else {
tcp_send_ack(sk);
}
+ if (queued > 0)
+ return 0;
return -1;
}

--
1.6.3.3


Attachments:
TCPCT+2e3+2.6.33-rc6.patch (2.01 kB)

2010-02-02 16:41:33

by William Allen Simpson

[permalink] [raw]
Subject: [PATCH v5 7/7] TCPCT part 2g: parse cookie pair and 64-bit timestamp

diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 2987ee8..b71be6c 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -260,13 +260,21 @@ struct tcp_options_received {
u8 num_sacks; /* Number of SACK blocks */
u16 user_mss; /* mss requested by user in ioctl */
u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
+
+ /* When the options are extended beyond the maximum 40 bytes,
+ * then this holds the additional data offset (in 32-bit words).
+ */
+ u16 extended:12, /* Up to 3,315 = 13 (40/3) by 255 */
+ saw_tstamp64:1, /* Seen on recent packet */
+ tstamp64_ok:1, /* Verified with cookie pair */
+ __unused:2;
};

static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
{
rx_opt->tstamp_ok = rx_opt->sack_ok = 0;
rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
- rx_opt->cookie_plus = 0;
+ rx_opt->tstamp64_ok = 0;
}

/* This is the max number of SACKS that we'll generate and process. It's safe
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 420e872..157c97b 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -156,9 +156,8 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
/*
* TCP option
*/
-
-#define TCPOPT_NOP 1 /* Padding */
#define TCPOPT_EOL 0 /* End of options */
+#define TCPOPT_NOP 1 /* Padding */
#define TCPOPT_MSS 2 /* Segment size negotiating */
#define TCPOPT_WINDOW 3 /* Window scaling */
#define TCPOPT_SACK_PERM 4 /* SACK Permitted */
@@ -166,30 +165,32 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
#define TCPOPT_COOKIE 253 /* Cookie extension (experimental) */
-
-/*
- * TCP option lengths
- */
-
-#define TCPOLEN_MSS 4
-#define TCPOLEN_WINDOW 3
-#define TCPOLEN_SACK_PERM 2
-#define TCPOLEN_TIMESTAMP 10
-#define TCPOLEN_MD5SIG 18
-#define TCPOLEN_COOKIE_BASE 2 /* Cookie-less header extension */
-#define TCPOLEN_COOKIE_PAIR 3 /* Cookie pair header extension */
-#define TCPOLEN_COOKIE_MIN (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MIN)
-#define TCPOLEN_COOKIE_MAX (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MAX)
-
-/* But this is what stacks really send out. */
-#define TCPOLEN_TSTAMP_ALIGNED 12
+#define TCPOPT_TSTAMP64 254 /* 64-bit extension (experimental) */
+
+/* TCP option lengths (same order as above) */
+#define TCPOLEN_MSS 4
+#define TCPOLEN_WINDOW 3
+#define TCPOLEN_SACK_PERM 2
+#define TCPOLEN_SACK_BASE 2
+#define TCPOLEN_SACK_PERBLOCK 8
+#define TCPOLEN_TIMESTAMP 10
+#define TCPOLEN_MD5SIG 18
+#define TCPOLEN_COOKIE_BASE 2 /* Cookie-less header extension */
+#define TCPOLEN_COOKIE_PAIR 3 /* Cookie pair header extension */
+#define TCPOLEN_COOKIE_MIN (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MIN)
+#define TCPOLEN_COOKIE_MAX (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MAX)
+#define TCPOLEN_TSTAMP64 3
+
+/* TCP options 32-bit aligned (same order as above) */
+#define TCPOLEN_MSS_ALIGNED 4
#define TCPOLEN_WSCALE_ALIGNED 4
#define TCPOLEN_SACKPERM_ALIGNED 4
-#define TCPOLEN_SACK_BASE 2
#define TCPOLEN_SACK_BASE_ALIGNED 4
-#define TCPOLEN_SACK_PERBLOCK 8
+#define TCPOLEN_TSTAMP_ALIGNED 12
#define TCPOLEN_MD5SIG_ALIGNED 20
-#define TCPOLEN_MSS_ALIGNED 4
+
+/* TCP option extensions (same order as above) */
+#define TCPOEXT_TSTAMP64 16

/* Flags in tp->nonagle */
#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index d3c6c7a..df38cef 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3722,9 +3722,71 @@ old_ack:
return 0;
}

+/* Process option extension data.
+ *
+ * Extension data in nonlinear skb is Not Yet Implemented!!!
+ *
+ * Returns:
+ * 0 on success
+ * - on failure
+ */
+int tcp_parse_extension(struct sk_buff *skb, const struct tcphdr *th,
+ struct tcp_options_received *opt_rx, u8 **hvpp)
+{
+ __be32 *tsp = (__be32 *)th + th->doff;
+ int remainder = skb_headlen(skb);
+
+ if (unlikely(th->syn)) {
+ /* Extended options are ignored on SYN or SYNACK, just as other
+ * malformed or unrecognized options. Leave the data in place.
+ */
+ opt_rx->extended = 0;
+ return 0;
+ }
+
+ /* Adjust end_seq, set in tcp_v[4,6]_rcv() */
+ TCP_SKB_CB(skb)->end_seq -= (opt_rx->extended * 4);
+
+ /* If present, always first, aligned */
+ if (opt_rx->saw_tstamp64) {
+ if (unlikely(remainder < TCPOEXT_TSTAMP64)) {
+ /* insufficient data */
+ opt_rx->saw_tstamp64 = 0 /* false */;
+ opt_rx->saw_tstamp = 0 /* false */;
+ } else {
+ /* 64-bits not yet implemented */
+ tsp++;
+ opt_rx->rcv_tsval = ntohl(*tsp);
+ tsp += 2;
+ opt_rx->rcv_tsecr = ntohl(*tsp);
+ tsp++;
+ }
+ remainder -= TCPOEXT_TSTAMP64;
+ }
+
+ /* If present, TCPOLEN_COOKIE_PAIR makes this an odd value */
+ if (opt_rx->cookie_plus & 0x1) {
+ int cookie_size = opt_rx->cookie_plus - TCPOLEN_COOKIE_PAIR;
+
+ if (unlikely(remainder < cookie_size)) {
+ /* insufficient data */
+ opt_rx->cookie_plus = 0;
+ } else {
+ *hvpp = (u8 *)tsp;
+ tsp += (cookie_size / 4);
+ }
+ remainder -= cookie_size;
+ }
+ return (remainder < 0) ? remainder : 0;
+}
+
/* Look for tcp options. Normally only called on SYN and SYNACK packets.
* But, this can also be called on packets in the established flow when
* the fast version below fails.
+ *
+ * Returns:
+ * 0 on success
+ * - on failure
*/
int tcp_parse_options(struct sk_buff *skb, const struct tcphdr *th,
struct tcp_options_received *opt_rx, u8 **hvpp, int estab)
@@ -3733,6 +3795,8 @@ int tcp_parse_options(struct sk_buff *skb, const struct tcphdr *th,
int length = tcp_option_len_th(th);

opt_rx->cookie_plus = 0;
+ opt_rx->extended = 0;
+ opt_rx->saw_tstamp64 = 0; /* false */
opt_rx->saw_tstamp = 0; /* false */

while (length > 0) {
@@ -3741,6 +3805,9 @@ int tcp_parse_options(struct sk_buff *skb, const struct tcphdr *th,

switch (opcode) {
case TCPOPT_EOL:
+ if (opt_rx->extended > 0)
+ return tcp_parse_extension(skb, th, opt_rx,
+ hvpp);
return 0;
case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
length--;
@@ -3753,6 +3820,9 @@ int tcp_parse_options(struct sk_buff *skb, const struct tcphdr *th,
opsize = *ptr++;
if (opsize < 2 || opsize > length) {
/* don't parse partial options */
+ if (opt_rx->extended > 0)
+ return tcp_parse_extension(skb, th, opt_rx,
+ hvpp);
return 0;
}

@@ -3829,7 +3899,16 @@ int tcp_parse_options(struct sk_buff *skb, const struct tcphdr *th,
/* not yet implemented */
break;
case TCPOLEN_COOKIE_PAIR:
- /* not yet implemented */
+ if (*ptr >= (TCPOLEN_COOKIE_MIN / 4) &&
+ *ptr <= (TCPOLEN_COOKIE_MAX / 4) &&
+ !th->syn && opt_rx->saw_tstamp &&
+ opt_rx->cookie_plus == 0 &&
+ (opt_rx->extended == 0 ||
+ (opt_rx->extended == (TCPOEXT_TSTAMP64 / 4) &&
+ opt_rx->saw_tstamp64))) {
+ opt_rx->cookie_plus = opsize + *ptr * 4;
+ }
+ opt_rx->extended += *ptr;
break;
case TCPOLEN_COOKIE_MIN+0:
case TCPOLEN_COOKIE_MIN+2:
@@ -3849,6 +3928,18 @@ int tcp_parse_options(struct sk_buff *skb, const struct tcphdr *th,
};
break;

+ case TCPOPT_TSTAMP64:
+ if (opsize == TCPOLEN_TSTAMP64) {
+ if (*ptr == (TCPOEXT_TSTAMP64 / 4) &&
+ !th->syn && !opt_rx->saw_tstamp &&
+ opt_rx->extended == 0) {
+ opt_rx->saw_tstamp64 = 1; /* true */
+ opt_rx->saw_tstamp = 1; /* true */
+ }
+ opt_rx->extended += *ptr;
+ }
+ break;
+
default:
/* skip unrecognized options */
break;
@@ -3857,6 +3948,8 @@ int tcp_parse_options(struct sk_buff *skb, const struct tcphdr *th,
ptr += opsize - 2;
length -= opsize;
}
+ if (opt_rx->extended > 0)
+ return tcp_parse_extension(skb, th, opt_rx, hvpp);
return 0;
}

@@ -3883,6 +3976,11 @@ static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th)

/* Fast parse options. This hopes to only see timestamps.
* If it is wrong it falls back on tcp_parse_options().
+ *
+ * Returns:
+ * 1 on success, fast
+ * 0 on success, slow
+ * - on failure
*/
static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
struct tcp_sock *tp, u8 **hvpp)
@@ -3892,11 +3990,14 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
*/
if (th->doff == (sizeof(*th) / 4)) {
tp->rx_opt.saw_tstamp = 0;
+ tp->rx_opt.extended = 0;
return 0;
- } else if (tp->rx_opt.tstamp_ok &&
- th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) {
- if (tcp_parse_aligned_timestamp(tp, th))
- return 1;
+ }
+ if (th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4) &&
+ tp->rx_opt.tstamp_ok &&
+ tcp_parse_aligned_timestamp(tp, th)) {
+ tp->rx_opt.extended = 0;
+ return 1;
}
return tcp_parse_options(skb, th, &tp->rx_opt, hvpp, 1);
}
@@ -3907,8 +4008,8 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
*/
u8 *tcp_parse_md5sig_option(struct tcphdr *th)
{
- int length = (th->doff << 2) - sizeof (*th);
u8 *ptr = (u8*)(th + 1);
+ int length = tcp_option_len_th(th);

/* If the TCP option is too short, we can short cut */
if (length < TCPOLEN_MD5SIG)
@@ -4373,7 +4474,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
goto drop;

- __skb_pull(skb, th->doff * 4);
+ __skb_pull(skb, (th->doff + tp->rx_opt.extended) * 4);

TCP_ECN_accept_cwr(tp, skb);

@@ -5034,8 +5135,8 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th)

/* Do we wait for any urgent data? - normally not... */
if (tp->urg_data == TCP_URG_NOTYET) {
- u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) -
- th->syn;
+ u32 ptr = ((th->doff + tp->rx_opt.extended) * 4)
+ + tp->urg_seq - ntohl(th->seq) - th->syn;

/* Is the urgent pointer pointing into this packet? */
if (ptr < skb->len) {
--
1.6.3.3


Attachments:
TCPCT+2g5+2.6.33-rc6.patch (9.79 kB)

2010-02-02 16:43:09

by William Allen Simpson

[permalink] [raw]
Subject: [PATCH v3 6/7] TCPCT part 2f: cleanup tcp_parse_options

diff --git a/include/net/tcp.h b/include/net/tcp.h
index 6b0d7e9..420e872 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -403,7 +403,8 @@ extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
size_t len, int nonblock,
int flags, int *addr_len);

-extern void tcp_parse_options(struct sk_buff *skb,
+extern int tcp_parse_options(struct sk_buff *skb,
+ const struct tcphdr *th,
struct tcp_options_received *opt_rx,
u8 **hvpp,
int estab);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 66fd80e..3bed530 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -254,6 +254,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
struct ip_options *opt)
{
struct tcp_options_received tcp_opt;
+ int parsed;
u8 *hash_location;
struct inet_request_sock *ireq;
struct tcp_request_sock *treq;
@@ -279,7 +280,9 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,

/* check for timestamp cookie support */
memset(&tcp_opt, 0, sizeof(tcp_opt));
- tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
+ parsed = tcp_parse_options(skb, th, &tcp_opt, &hash_location, 0);
+ if (parsed < 0)
+ goto out;

if (tcp_opt.saw_tstamp)
cookie_check_timestamp(&tcp_opt);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 165040e..d3c6c7a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3726,15 +3726,14 @@ old_ack:
* But, this can also be called on packets in the established flow when
* the fast version below fails.
*/
-void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
- u8 **hvpp, int estab)
+int tcp_parse_options(struct sk_buff *skb, const struct tcphdr *th,
+ struct tcp_options_received *opt_rx, u8 **hvpp, int estab)
{
- unsigned char *ptr;
- struct tcphdr *th = tcp_hdr(skb);
- int length = (th->doff * 4) - sizeof(struct tcphdr);
+ unsigned char *ptr = (unsigned char *)(th + 1);
+ int length = tcp_option_len_th(th);

- ptr = (unsigned char *)(th + 1);
- opt_rx->saw_tstamp = 0;
+ opt_rx->cookie_plus = 0;
+ opt_rx->saw_tstamp = 0; /* false */

while (length > 0) {
int opcode = *ptr++;
@@ -3742,106 +3741,130 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,

switch (opcode) {
case TCPOPT_EOL:
- return;
+ return 0;
case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
length--;
continue;
default:
- opsize = *ptr++;
- if (opsize < 2) /* "silly options" */
- return;
- if (opsize > length)
- return; /* don't parse partial options */
- switch (opcode) {
- case TCPOPT_MSS:
- if (opsize == TCPOLEN_MSS && th->syn && !estab) {
- u16 in_mss = get_unaligned_be16(ptr);
- if (in_mss) {
- if (opt_rx->user_mss &&
- opt_rx->user_mss < in_mss)
- in_mss = opt_rx->user_mss;
- opt_rx->mss_clamp = in_mss;
- }
- }
- break;
- case TCPOPT_WINDOW:
- if (opsize == TCPOLEN_WINDOW && th->syn &&
- !estab && sysctl_tcp_window_scaling) {
- __u8 snd_wscale = *(__u8 *)ptr;
- opt_rx->wscale_ok = 1;
- if (snd_wscale > 14) {
- if (net_ratelimit())
- printk(KERN_INFO "tcp_parse_options: Illegal window "
- "scaling value %d >14 received.\n",
- snd_wscale);
- snd_wscale = 14;
- }
- opt_rx->snd_wscale = snd_wscale;
- }
- break;
- case TCPOPT_TIMESTAMP:
- if ((opsize == TCPOLEN_TIMESTAMP) &&
- ((estab && opt_rx->tstamp_ok) ||
- (!estab && sysctl_tcp_timestamps))) {
- opt_rx->saw_tstamp = 1;
- opt_rx->rcv_tsval = get_unaligned_be32(ptr);
- opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4);
- }
- break;
- case TCPOPT_SACK_PERM:
- if (opsize == TCPOLEN_SACK_PERM && th->syn &&
- !estab && sysctl_tcp_sack) {
- opt_rx->sack_ok = 1;
- tcp_sack_reset(opt_rx);
+ /* fallthru */
+ break;
+ };
+
+ opsize = *ptr++;
+ if (opsize < 2 || opsize > length) {
+ /* don't parse partial options */
+ return 0;
+ }
+
+ switch (opcode) {
+ case TCPOPT_MSS:
+ if (opsize == TCPOLEN_MSS && th->syn && !estab) {
+ u16 in_mss = get_unaligned_be16(ptr);
+ if (in_mss) {
+ if (opt_rx->user_mss &&
+ opt_rx->user_mss < in_mss)
+ in_mss = opt_rx->user_mss;
+ opt_rx->mss_clamp = in_mss;
}
- break;
+ }
+ break;

- case TCPOPT_SACK:
- if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) &&
- !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) &&
- opt_rx->sack_ok) {
- TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
+ case TCPOPT_WINDOW:
+ if (opsize == TCPOLEN_WINDOW && th->syn &&
+ !estab && sysctl_tcp_window_scaling) {
+ __u8 snd_wscale = *(__u8 *)ptr;
+ opt_rx->wscale_ok = 1;
+ if (snd_wscale > 14) {
+ if (net_ratelimit())
+ printk(KERN_INFO
+ "tcp_parse_options: "
+ "window scaling value "
+ "%d > 14 received.\n",
+ snd_wscale);
+ snd_wscale = 14;
}
- break;
+ opt_rx->snd_wscale = snd_wscale;
+ }
+ break;
+
+ case TCPOPT_SACK_PERM:
+ if (opsize == TCPOLEN_SACK_PERM && th->syn &&
+ !estab && sysctl_tcp_sack) {
+ opt_rx->sack_ok = 1;
+ tcp_sack_reset(opt_rx);
+ }
+ break;
+
+ case TCPOPT_SACK:
+ if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) &&
+ !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) &&
+ opt_rx->sack_ok) {
+ TCP_SKB_CB(skb)->sacked = (ptr - 2)
+ - (unsigned char *)th;
+ }
+ break;
+
+ case TCPOPT_TIMESTAMP:
+ if ((opsize == TCPOLEN_TIMESTAMP) &&
+ ((estab && opt_rx->tstamp_ok) ||
+ (!estab && sysctl_tcp_timestamps))) {
+ opt_rx->saw_tstamp = 1;
+ opt_rx->rcv_tsval = get_unaligned_be32(ptr);
+ opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4);
+ }
+ break;
#ifdef CONFIG_TCP_MD5SIG
- case TCPOPT_MD5SIG:
- /*
- * The MD5 Hash has already been
- * checked (see tcp_v{4,6}_do_rcv()).
- */
- break;
+ case TCPOPT_MD5SIG:
+ /*
+ * The MD5 Hash has already been
+ * checked (see tcp_v{4,6}_do_rcv()).
+ */
+ break;
#endif
- case TCPOPT_COOKIE:
- /* This option is variable length.
- */
- switch (opsize) {
- case TCPOLEN_COOKIE_BASE:
- /* not yet implemented */
- break;
- case TCPOLEN_COOKIE_PAIR:
- /* not yet implemented */
- break;
- case TCPOLEN_COOKIE_MIN+0:
- case TCPOLEN_COOKIE_MIN+2:
- case TCPOLEN_COOKIE_MIN+4:
- case TCPOLEN_COOKIE_MIN+6:
- case TCPOLEN_COOKIE_MAX:
- /* 16-bit multiple */
+ case TCPOPT_COOKIE:
+ /* This option is variable length.
+ */
+ switch (opsize) {
+ case TCPOLEN_COOKIE_BASE:
+ /* not yet implemented */
+ break;
+ case TCPOLEN_COOKIE_PAIR:
+ /* not yet implemented */
+ break;
+ case TCPOLEN_COOKIE_MIN+0:
+ case TCPOLEN_COOKIE_MIN+2:
+ case TCPOLEN_COOKIE_MIN+4:
+ case TCPOLEN_COOKIE_MIN+6:
+ case TCPOLEN_COOKIE_MAX:
+ /* 16-bit multiple */
+ if (th->syn && opt_rx->saw_tstamp &&
+ opt_rx->cookie_plus == 0) {
opt_rx->cookie_plus = opsize;
*hvpp = ptr;
- default:
- /* ignore option */
- break;
- };
+ }
+ break;
+ default:
+ /* ignore option */
break;
};
+ break;

- ptr += opsize-2;
- length -= opsize;
- }
+ default:
+ /* skip unrecognized options */
+ break;
+ };
+
+ ptr += opsize - 2;
+ length -= opsize;
}
+ return 0;
}

+/*
+ * Returns:
+ * 1 on success
+ * 0 on failure
+ */
static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th)
{
__be32 *ptr = (__be32 *)(th + 1);
@@ -3875,8 +3898,7 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
if (tcp_parse_aligned_timestamp(tp, th))
return 1;
}
- tcp_parse_options(skb, &tp->rx_opt, hvpp, 1);
- return 1;
+ return tcp_parse_options(skb, th, &tp->rx_opt, hvpp, 1);
}

#ifdef CONFIG_TCP_MD5SIG
@@ -5127,10 +5149,13 @@ static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
{
u8 *hash_location;
struct tcp_sock *tp = tcp_sk(sk);
+ int parsed = tcp_fast_parse_options(skb, th, tp, &hash_location);
+
+ if (parsed < 0)
+ goto discard;

/* RFC1323: H1. Apply PAWS check first. */
- if (tcp_fast_parse_options(skb, th, tp, &hash_location) &&
- tp->rx_opt.saw_tstamp &&
+ if (tp->rx_opt.saw_tstamp &&
tcp_paws_discard(sk, skb)) {
if (!th->rst) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
@@ -5410,8 +5435,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
struct tcp_cookie_values *cvp = tp->cookie_values;
int saved_clamp = tp->rx_opt.mss_clamp;
int queued = 0;
+ int parsed = tcp_parse_options(skb, th, &tp->rx_opt, &hash_location, 0);

- tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0);
+ if (parsed < 0)
+ goto discard;

if (th->ack) {
/* rfc793:
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index f999e06..3f0813f 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1215,6 +1215,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct tcp_extend_values tmp_ext;
struct tcp_options_received tmp_opt;
+ int parsed;
u8 *hash_location;
struct request_sock *req;
struct inet_request_sock *ireq;
@@ -1265,7 +1266,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_clear_options(&tmp_opt);
tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
tmp_opt.user_mss = tp->rx_opt.user_mss;
- tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
+ parsed = tcp_parse_options(skb, tcp_hdr(skb), &tmp_opt, &hash_location,
+ 0);
+ if (parsed < 0)
+ goto drop_and_free;

if (tmp_opt.cookie_plus > 0 &&
tmp_opt.saw_tstamp &&
@@ -1278,7 +1282,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;

if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
- goto drop_and_release;
+ goto drop_and_free;

/* Secret recipe starts with IP addresses */
*mess++ ^= daddr;
@@ -1299,7 +1303,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
tmp_ext.cookie_out_never = 1; /* true */
tmp_ext.cookie_plus = 0;
} else {
- goto drop_and_release;
+ goto drop_and_free;
}
tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;

diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 37b7536..0d42635 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -97,9 +97,12 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,

tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
- tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
+ int parsed = tcp_parse_options(skb, th, &tmp_opt,
+ &hash_location, 0);

- if (tmp_opt.saw_tstamp) {
+ if (parsed < 0) {
+ paws_reject = 1; /* true */
+ } else if (tmp_opt.saw_tstamp) {
tmp_opt.ts_recent = tcptw->tw_ts_recent;
tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
@@ -528,9 +531,12 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,

tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(struct tcphdr)>>2)) {
- tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
+ int parsed = tcp_parse_options(skb, th, &tmp_opt,
+ &hash_location, 0);

- if (tmp_opt.saw_tstamp) {
+ if (parsed < 0) {
+ paws_reject = 1; /* true */
+ } else if (tmp_opt.saw_tstamp) {
tmp_opt.ts_recent = req->ts_recent;
/* We do not store true stamp, but it is not required,
* it can be estimated (approximately)
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 7208a06..3072500 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -160,6 +160,7 @@ static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
{
struct tcp_options_received tcp_opt;
+ int parsed;
u8 *hash_location;
struct inet_request_sock *ireq;
struct inet6_request_sock *ireq6;
@@ -187,7 +188,9 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)

/* check for timestamp cookie support */
memset(&tcp_opt, 0, sizeof(tcp_opt));
- tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
+ parsed = tcp_parse_options(skb, th, &tcp_opt, &hash_location, 0);
+ if (parsed < 0)
+ goto out;

if (tcp_opt.saw_tstamp)
cookie_check_timestamp(&tcp_opt);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 3d08a4d..e15e4f6 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1164,6 +1164,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct tcp_extend_values tmp_ext;
struct tcp_options_received tmp_opt;
+ int parsed;
u8 *hash_location;
struct request_sock *req;
struct inet6_request_sock *treq;
@@ -1207,7 +1208,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_clear_options(&tmp_opt);
tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
tmp_opt.user_mss = tp->rx_opt.user_mss;
- tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
+ parsed = tcp_parse_options(skb, tcp_hdr(skb), &tmp_opt, &hash_location,
+ 0);
+ if (parsed < 0)
+ goto drop_and_free;

if (tmp_opt.cookie_plus > 0 &&
tmp_opt.saw_tstamp &&
--
1.6.3.3


Attachments:
TCPCT+2f3+2.6.33-rc6.patch (13.21 kB)