Commit ebc50b26 authored by Paul's avatar Paul
Browse files

new case tcp_input.c

parent b82d8cfa
......@@ -58,3 +58,4 @@
-Q _build/default/tutorial/proofs/lithium refinedc.tutorial.lithium
-Q _build/default/linux/casestudies/mt7601u/proofs/mac refinedc.linux.casestudies.mt7601u.mac
-Q _build/default/linux/casestudies/proofs/x86_pgtable refinedc.linux.casestudies.x86_pgtable
-Q _build/default/linux/casestudies/proofs/tcp_input refinedc.linux.casestudies.tcp_input
#include <stdbool.h>
#include "bits.h"
// include/net/sock.h
struct sock {
// we don't care the actual fields of this struct
int dummy;
};
// include/linux/skbuff.h
struct sk_buff {
// we don't care the actual fields of this struct
int dummy;
};
// include/uapi/linux/tcp.h
struct tcphdr {
bool cwr;
bool ece;
bool syn;
};
// include/linux/tcp.h
struct tcp_sock {
u8 ecn_flags; /* ECN status bits. */
};
static inline struct tcp_sock *tcp_sk(const struct sock *sk);
static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb);
// include/net/tcp.h
#define TCP_ECN_OK BIT(0)
#define TCP_ECN_QUEUE_CWR BIT(1)
#define TCP_ECN_DEMAND_CWR BIT(2)
#define TCP_ECN_SEEN BIT(3)
/* This is what the send packet queuing engine uses to pass
* TCP per-packet control information to the transmission code.
* We also store the host-order sequence numbers in here too.
* This is 44 bytes if IPV6 is enabled.
* If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
*/
struct tcp_skb_cb {
u32 seq; /* Starting sequence number */
u32 end_seq; /* SEQ + FIN + SYN + datalen */
u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
};
static inline struct tcp_skb_cb *TCP_SKB_CB(const struct sk_buff *skb);
// include/net/inet_connection_sock.h
/** inet_connection_sock - INET connection oriented sock
* @icsk_ack: Delayed ACK control data
*/
struct inet_connection_sock {
struct {
u8 pending; /* ACK is pending */
// __u8 quick; /* Scheduled number of quick acks */
// __u8 pingpong; /* The session is interactive */
// __u8 retry; /* Number of attempts */
// __u32 ato; /* Predicted tick of soft clock */
// unsigned long timeout; /* Currently scheduled timeout */
// __u32 lrcvtime; /* timestamp of last received data packet */
// __u16 last_seg_size; /* Size of last incoming segment */
// __u16 rcv_mss; /* MSS used for delayed ACK decisions */
} icsk_ack;
};
static inline struct inet_connection_sock *inet_csk(const struct sock *sk);
// include/net/inet_ecn.h
enum {
INET_ECN_NOT_ECT = 0,
INET_ECN_ECT_1 = 1,
INET_ECN_ECT_0 = 2,
INET_ECN_CE = 3,
INET_ECN_MASK = 3,
};
enum inet_csk_ack_state_t {
ICSK_ACK_SCHED = 1,
ICSK_ACK_TIMER = 2,
ICSK_ACK_PUSHED = 4,
ICSK_ACK_PUSHED2 = 8,
ICSK_ACK_NOW = 16 /* Send the next ACK immediately (once) */
};
// tcp_input functions for tcp ecn
static void tcp_ecn_queue_cwr(struct tcp_sock *tp)
{
if (tp->ecn_flags & TCP_ECN_OK)
tp->ecn_flags |= TCP_ECN_QUEUE_CWR;
}
static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb)
{
if (tcp_hdr(skb)->cwr) {
tcp_sk(sk)->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
/* If the sender is telling us it has entered CWR, then its
* cwnd may be very low (even just 1 packet), so we should ACK
* immediately.
*/
if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq)
inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
}
}
static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
{
tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
}
static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) {
case INET_ECN_NOT_ECT:
/* Funny extension: if ECT is not set on a segment,
* and we already seen ECT on a previous segment,
* it is probably a retransmit.
*/
if (tp->ecn_flags & TCP_ECN_SEEN) {
// tcp_enter_quickack_mode(sk, 2);
}
break;
case INET_ECN_CE:
// if (tcp_ca_needs_ecn(sk))
// tcp_ca_event(sk, CA_EVENT_ECN_IS_CE);
if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
/* Better not delay acks, sender can have a very low cwnd */
// tcp_enter_quickack_mode(sk, 2);
tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
}
tp->ecn_flags |= TCP_ECN_SEEN;
break;
default:
// if (tcp_ca_needs_ecn(sk))
// tcp_ca_event(sk, CA_EVENT_ECN_NO_CE);
tp->ecn_flags |= TCP_ECN_SEEN;
break;
}
}
static void tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
{
if (tcp_sk(sk)->ecn_flags & TCP_ECN_OK)
__tcp_ecn_check_ce(sk, skb);
}
static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
{
if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr))
tp->ecn_flags &= ~TCP_ECN_OK;
}
static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th)
{
if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr))
tp->ecn_flags &= ~TCP_ECN_OK;
}
static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
{
if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK))
return true;
return false;
}
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment