aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/skbuff.h1
-rw-r--r--include/linux/tcp.h73
-rw-r--r--include/net/tcp.h1
-rw-r--r--include/uapi/linux/inet_diag.h18
-rw-r--r--include/uapi/linux/tcp.h7
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/skbuff.c5
-rw-r--r--net/ipv4/Kconfig20
-rw-r--r--net/ipv4/Makefile5
-rw-r--r--net/ipv4/tcp.c17
-rw-r--r--net/ipv4/tcp_cong.c31
-rw-r--r--net/ipv4/tcp_input.c22
-rw-r--r--net/ipv4/tcp_mytcp.c553
-rw-r--r--net/ipv4/tcp_mytcp.h74
-rw-r--r--net/ipv4/tcp_mytcplte.c436
-rw-r--r--net/ipv4/tcp_mytcplte.h73
-rw-r--r--net/ipv4/tcp_output.c225
-rw-r--r--net/ipv4/udp_bpf.c1
18 files changed, 1545 insertions, 19 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 39636fe7e..5642fc287 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3859,6 +3859,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
3859 struct skb_shared_hwtstamps *hwtstamps); 3859 struct skb_shared_hwtstamps *hwtstamps);
3860 3860
3861void __skb_tstamp_tx(struct sk_buff *orig_skb, 3861void __skb_tstamp_tx(struct sk_buff *orig_skb,
3862 const struct sk_buff *ack_skb,
3862 struct skb_shared_hwtstamps *hwtstamps, 3863 struct skb_shared_hwtstamps *hwtstamps,
3863 struct sock *sk, int tstype); 3864 struct sock *sk, int tstype);
3864 3865
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index e62bf28b8..5feb49ee9 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -265,7 +265,8 @@ struct tcp_sock {
265 u32 packets_out; /* Packets which are "in flight" */ 265 u32 packets_out; /* Packets which are "in flight" */
266 u32 retrans_out; /* Retransmitted packets out */ 266 u32 retrans_out; /* Retransmitted packets out */
267 u32 max_packets_out; /* max packets_out in last window */ 267 u32 max_packets_out; /* max packets_out in last window */
268 u32 cwnd_usage_seq; /* right edge of cwnd usage tracking flight */ 268 u32 cwnd_usage_seq; /* right edge of cwnd usage tracking flight */ // to be commented
269 u32 max_packets_seq; /* right edge of max_packets_out flight */
269 270
270 u16 urg_data; /* Saved octet of OOB data and control flags */ 271 u16 urg_data; /* Saved octet of OOB data and control flags */
271 u8 ecn_flags; /* ECN status bits. */ 272 u8 ecn_flags; /* ECN status bits. */
@@ -415,8 +416,73 @@ struct tcp_sock {
415 */ 416 */
416 struct request_sock __rcu *fastopen_rsk; 417 struct request_sock __rcu *fastopen_rsk;
417 struct saved_syn *saved_syn; 418 struct saved_syn *saved_syn;
419/* my fast start added member */
420
421 u64 myfast_bytes_sent_count;
422 u64 myfast_bytes_acked_count;
423 __be32 myfast_start_of_round_seq;
424 __be32 myfast_seq;
425 __be32 myfast_end_of_round_seq;
426 __be16 myfast_source;
427 u64 myfast_time_first_of_round_send;
428 u64 myfast_time_end_of_round_send;
429 u64 myfast_time_last_send;
430 u64 myfast_last_ack_time;
431 u64 myfast_time_used;
432 u64 myfast_send_gap;
433 u64 myfast_send_gap_max;
434 u64 myfast_ack_time_used;
435 u64 myfast_time_wait;
436 u32 myfast_ack_seq;
437 u32 myfast_last_ack_seq;
438 s32 myfast_min_rtt;
439 s32 myfast_rtt;
440 int myfast_flag;
441 u32 myfast_bdp;
442 u32 myfast_throughput;
443 u32 myfast_ack_throughput;
444 int myfast_cal_bdp_start_flag;
445 int myfast_enable_calc;
446 int myfast_init_wind_flag;
447 int myfast_new_round_flag;
448 int myfast_ack_start_flag;
449 int myfast_send_flag;
450 int myfast_too_large;
451 int myfast_calc;
452 int myfast_reach_thresh;
453
454 int mytcp_full_pipe_flag;
455 int mytcp_enable_flag;
456 u32 mytcp_stopped_cwnd;
457 u64 mytcp_wmem_last;
458 u64 mytcp_wmem;
459 u32 mytcp_est_rq;
460 // new
461 u32 mytcp_est_queue;
462 u32 mytcp_est_max_queue;
463 u32 mytcp_est_max_queue_real;
464 u32 mytcp_est_queue_last;
465 int mytcp_est_round_flag;
466 // new ended
467 int mytcp_wmem_add_flag;
468 int mytcp_round_flag;
469 int mytcp_ece_flag;
470 u32 mytcp_ece_count;
471 u32 mytcp_bytes_send;
472 u32 mytcp_acked_bytes;
473 u32 mytcp_acked_seq_last;
474
475 // new
476 u32 my_ip_rtt;
477
478 u32 my_last_ack;
479 u32 my_ack_seq_gap;
480 // new ended
418}; 481};
419 482
483void calc_myfast_BDP(struct tcp_sock *tp);
484/* myfast start added member */
485
420enum tsq_enum { 486enum tsq_enum {
421 TSQ_THROTTLED, 487 TSQ_THROTTLED,
422 TSQ_QUEUED, 488 TSQ_QUEUED,
@@ -498,8 +564,11 @@ static inline u32 tcp_saved_syn_len(const struct saved_syn *saved_syn)
498 saved_syn->tcp_hdrlen; 564 saved_syn->tcp_hdrlen;
499} 565}
500 566
567// struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
568// const struct sk_buff *orig_skb);
501struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, 569struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
502 const struct sk_buff *orig_skb); 570 const struct sk_buff *orig_skb,
571 const struct sk_buff *ack_skb);
503 572
504static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss) 573static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss)
505{ 574{
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 08e3a2488..7a8f950f5 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1111,6 +1111,7 @@ u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1111void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked); 1111void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1112 1112
1113u32 tcp_reno_ssthresh(struct sock *sk); 1113u32 tcp_reno_ssthresh(struct sock *sk);
1114u32 tcp_mytcp_ssthresh(struct sock *sk);
1114u32 tcp_reno_undo_cwnd(struct sock *sk); 1115u32 tcp_reno_undo_cwnd(struct sock *sk);
1115void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked); 1116void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1116extern struct tcp_congestion_ops tcp_reno; 1117extern struct tcp_congestion_ops tcp_reno;
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index 20ee93f0f..91c6e7df6 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -155,6 +155,7 @@ enum {
155 INET_DIAG_PAD, 155 INET_DIAG_PAD,
156 INET_DIAG_MARK, /* only with CAP_NET_ADMIN */ 156 INET_DIAG_MARK, /* only with CAP_NET_ADMIN */
157 INET_DIAG_BBRINFO, /* request as INET_DIAG_VEGASINFO */ 157 INET_DIAG_BBRINFO, /* request as INET_DIAG_VEGASINFO */
158 INET_DIAG_MYTCPINFO,
158 INET_DIAG_CLASS_ID, /* request as INET_DIAG_TCLASS */ 159 INET_DIAG_CLASS_ID, /* request as INET_DIAG_TCLASS */
159 INET_DIAG_MD5SIG, 160 INET_DIAG_MD5SIG,
160 INET_DIAG_ULP_INFO, 161 INET_DIAG_ULP_INFO,
@@ -210,6 +211,21 @@ struct tcpvegas_info {
210 __u32 tcpv_minrtt; 211 __u32 tcpv_minrtt;
211}; 212};
212 213
214
215struct tcpmytcp_info {
216 __u32 tcpv_enabled;
217 __u32 tcpv_rttcnt;
218 __u32 tcpv_rtt;
219 __u32 tcpv_minrtt;
220};
221
222struct tcpmytcplte_info {
223 __u32 tcpv_enabled;
224 __u32 tcpv_rttcnt;
225 __u32 tcpv_rtt;
226 __u32 tcpv_minrtt;
227};
228
213/* INET_DIAG_DCTCPINFO */ 229/* INET_DIAG_DCTCPINFO */
214 230
215struct tcp_dctcp_info { 231struct tcp_dctcp_info {
@@ -234,6 +250,8 @@ struct tcp_bbr_info {
234union tcp_cc_info { 250union tcp_cc_info {
235 struct tcpvegas_info vegas; 251 struct tcpvegas_info vegas;
236 struct tcp_dctcp_info dctcp; 252 struct tcp_dctcp_info dctcp;
253 struct tcpvegas_info mytcp;
254 struct tcpvegas_info mytcplte;
237 struct tcp_bbr_info bbr; 255 struct tcp_bbr_info bbr;
238}; 256};
239#endif /* _UAPI_INET_DIAG_H_ */ 257#endif /* _UAPI_INET_DIAG_H_ */
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 62db78b9c..856dfcece 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -314,6 +314,7 @@ enum {
314 TCP_NLA_TIMEOUT_REHASH, /* Timeout-triggered rehash attempts */ 314 TCP_NLA_TIMEOUT_REHASH, /* Timeout-triggered rehash attempts */
315 TCP_NLA_BYTES_NOTSENT, /* Bytes in write queue not yet sent */ 315 TCP_NLA_BYTES_NOTSENT, /* Bytes in write queue not yet sent */
316 TCP_NLA_EDT, /* Earliest departure time (CLOCK_MONOTONIC) */ 316 TCP_NLA_EDT, /* Earliest departure time (CLOCK_MONOTONIC) */
317 TCP_NLA_TTL, /* TTL or hop limit of a packet received */
317}; 318};
318 319
319/* for TCP_MD5SIG socket option */ 320/* for TCP_MD5SIG socket option */
@@ -343,6 +344,7 @@ struct tcp_diag_md5sig {
343 344
344/* setsockopt(fd, IPPROTO_TCP, TCP_ZEROCOPY_RECEIVE, ...) */ 345/* setsockopt(fd, IPPROTO_TCP, TCP_ZEROCOPY_RECEIVE, ...) */
345 346
347#define TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT 0x1
346struct tcp_zerocopy_receive { 348struct tcp_zerocopy_receive {
347 __u64 address; /* in: address of mapping */ 349 __u64 address; /* in: address of mapping */
348 __u32 length; /* in/out: number of bytes to map/mapped */ 350 __u32 length; /* in/out: number of bytes to map/mapped */
@@ -351,5 +353,10 @@ struct tcp_zerocopy_receive {
351 __s32 err; /* out: socket error */ 353 __s32 err; /* out: socket error */
352 __u64 copybuf_address; /* in: copybuf address (small reads) */ 354 __u64 copybuf_address; /* in: copybuf address (small reads) */
353 __s32 copybuf_len; /* in/out: copybuf bytes avail/used or error */ 355 __s32 copybuf_len; /* in/out: copybuf bytes avail/used or error */
356 __u32 flags; /* in: flags */
357 __u64 msg_control; /* ancillary data */
358 __u64 msg_controllen;
359 __u32 msg_flags;
360 __u32 reserved; /* set to 0 for now */
354}; 361};
355#endif /* _UAPI_LINUX_TCP_H */ 362#endif /* _UAPI_LINUX_TCP_H */
diff --git a/net/core/dev.c b/net/core/dev.c
index 37bb60a7e..e1906f2f3 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4100,7 +4100,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
4100 skb_assert_len(skb); 4100 skb_assert_len(skb);
4101 4101
4102 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP)) 4102 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
4103 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED); 4103 __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED);
4104 4104
4105 /* Disable soft irqs for various locks below. Also 4105 /* Disable soft irqs for various locks below. Also
4106 * stops preemption for RCU. 4106 * stops preemption for RCU.
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2b12e0730..b2c124f22 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4723,6 +4723,7 @@ err:
4723EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 4723EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
4724 4724
4725void __skb_tstamp_tx(struct sk_buff *orig_skb, 4725void __skb_tstamp_tx(struct sk_buff *orig_skb,
4726 const struct sk_buff *ack_skb,
4726 struct skb_shared_hwtstamps *hwtstamps, 4727 struct skb_shared_hwtstamps *hwtstamps,
4727 struct sock *sk, int tstype) 4728 struct sock *sk, int tstype)
4728{ 4729{
@@ -4745,7 +4746,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
4745 if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) && 4746 if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
4746 sk->sk_protocol == IPPROTO_TCP && 4747 sk->sk_protocol == IPPROTO_TCP &&
4747 sk->sk_type == SOCK_STREAM) { 4748 sk->sk_type == SOCK_STREAM) {
4748 skb = tcp_get_timestamping_opt_stats(sk, orig_skb); 4749 skb = tcp_get_timestamping_opt_stats(sk, orig_skb, ack_skb);
4749 opt_stats = true; 4750 opt_stats = true;
4750 } else 4751 } else
4751#endif 4752#endif
@@ -4774,7 +4775,7 @@ EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
4774void skb_tstamp_tx(struct sk_buff *orig_skb, 4775void skb_tstamp_tx(struct sk_buff *orig_skb,
4775 struct skb_shared_hwtstamps *hwtstamps) 4776 struct skb_shared_hwtstamps *hwtstamps)
4776{ 4777{
4777 return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk, 4778 return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk,
4778 SCM_TSTAMP_SND); 4779 SCM_TSTAMP_SND);
4779} 4780}
4780EXPORT_SYMBOL_GPL(skb_tstamp_tx); 4781EXPORT_SYMBOL_GPL(skb_tstamp_tx);
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 23b06063e..52c066199 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -555,6 +555,26 @@ config TCP_CONG_VEGAS
555 adjusts the sending rate by modifying the congestion 555 adjusts the sending rate by modifying the congestion
556 window. TCP Vegas should provide less packet loss, but it is 556 window. TCP Vegas should provide less packet loss, but it is
557 not as aggressive as TCP Reno. 557 not as aggressive as TCP Reno.
558
559config TCP_CONG_MYTCP
560 tristate "TCP Mytcp"
561 default y
562 help
563 TCP mytcp is a sender-side only change to TCP that anticipates
564 the onset of congestion by estimating the bandwidth. TCP mytcp
565 adjusts the sending rate by modifying the congestion
566 window. TCP mytcp should provide less packet loss, but it is
567 not as aggressive as TCP Reno.
568
569config TCP_CONG_MYTCPLTE
570 tristate "TCP MytcpLTE"
571 default y
572 help
573 TCP mytcplte is a sender-side only change to TCP that anticipates
574 the onset of congestion by estimating the bandwidth. TCP mytcplte
575 adjusts the sending rate by modifying the congestion
576 window. TCP mytcplte should provide less packet loss, but it is
577 not as aggressive as TCP Reno.
558 578
559config TCP_CONG_NV 579config TCP_CONG_NV
560 tristate "TCP NV" 580 tristate "TCP NV"
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 5b77a4688..2a1ae0ba6 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -14,7 +14,8 @@ obj-y := route.o inetpeer.o protocol.o \
14 udp_offload.o arp.o icmp.o devinet.o af_inet.o igmp.o \ 14 udp_offload.o arp.o icmp.o devinet.o af_inet.o igmp.o \
15 fib_frontend.o fib_semantics.o fib_trie.o fib_notifier.o \ 15 fib_frontend.o fib_semantics.o fib_trie.o fib_notifier.o \
16 inet_fragment.o ping.o ip_tunnel_core.o gre_offload.o \ 16 inet_fragment.o ping.o ip_tunnel_core.o gre_offload.o \
17 metrics.o netlink.o nexthop.o udp_tunnel_stub.o 17 metrics.o netlink.o nexthop.o udp_tunnel_stub.o \
18 tcp_mytcp.o tcp_mytcplte.o
18 19
19obj-$(CONFIG_BPFILTER) += bpfilter/ 20obj-$(CONFIG_BPFILTER) += bpfilter/
20 21
@@ -55,6 +56,8 @@ obj-$(CONFIG_TCP_CONG_HSTCP) += tcp_highspeed.o
55obj-$(CONFIG_TCP_CONG_HYBLA) += tcp_hybla.o 56obj-$(CONFIG_TCP_CONG_HYBLA) += tcp_hybla.o
56obj-$(CONFIG_TCP_CONG_HTCP) += tcp_htcp.o 57obj-$(CONFIG_TCP_CONG_HTCP) += tcp_htcp.o
57obj-$(CONFIG_TCP_CONG_VEGAS) += tcp_vegas.o 58obj-$(CONFIG_TCP_CONG_VEGAS) += tcp_vegas.o
59obj-$(CONFIG_TCP_CONG_MYTCP) += tcp_mytcp.o
60obj-$(CONFIG_TCP_CONG_MYTCPLTE) += tcp_mytcplte.o
58obj-$(CONFIG_TCP_CONG_NV) += tcp_nv.o 61obj-$(CONFIG_TCP_CONG_NV) += tcp_nv.o
59obj-$(CONFIG_TCP_CONG_VENO) += tcp_veno.o 62obj-$(CONFIG_TCP_CONG_VENO) += tcp_veno.o
60obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o 63obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 74edc1252..da99f3340 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3625,8 +3625,20 @@ static size_t tcp_opt_stats_get_size(void)
3625 0; 3625 0;
3626} 3626}
3627 3627
3628/* Returns TTL or hop limit of an incoming packet from skb. */
3629static u8 tcp_skb_ttl_or_hop_limit(const struct sk_buff *skb)
3630{
3631 if (skb->protocol == htons(ETH_P_IP))
3632 return ip_hdr(skb)->ttl;
3633 else if (skb->protocol == htons(ETH_P_IPV6))
3634 return ipv6_hdr(skb)->hop_limit;
3635 else
3636 return 0;
3637}
3638
3628struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, 3639struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
3629 const struct sk_buff *orig_skb) 3640 const struct sk_buff *orig_skb,
3641 const struct sk_buff *ack_skb)
3630{ 3642{
3631 const struct tcp_sock *tp = tcp_sk(sk); 3643 const struct tcp_sock *tp = tcp_sk(sk);
3632 struct sk_buff *stats; 3644 struct sk_buff *stats;
@@ -3682,6 +3694,9 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
3682 max_t(int, 0, tp->write_seq - tp->snd_nxt)); 3694 max_t(int, 0, tp->write_seq - tp->snd_nxt));
3683 nla_put_u64_64bit(stats, TCP_NLA_EDT, orig_skb->skb_mstamp_ns, 3695 nla_put_u64_64bit(stats, TCP_NLA_EDT, orig_skb->skb_mstamp_ns,
3684 TCP_NLA_PAD); 3696 TCP_NLA_PAD);
3697 if (ack_skb)
3698 nla_put_u8(stats, TCP_NLA_TTL,
3699 tcp_skb_ttl_or_hop_limit(ack_skb));
3685 3700
3686 return stats; 3701 return stats;
3687} 3702}
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index db5831e6c..be24f070c 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -395,10 +395,39 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
395 */ 395 */
396u32 tcp_slow_start(struct tcp_sock *tp, u32 acked) 396u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
397{ 397{
398 u32 myfast_max_cwnd;
398 u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh); 399 u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh);
400 u32 cwnd_full;
401
402 if(tp->mytcp_full_pipe_flag == 1){
403 cwnd_full = tp->snd_cwnd;
404 tp->mytcp_stopped_cwnd = tp->mytcp_stopped_cwnd + (cwnd - cwnd_full);
405 tp->snd_cwnd = min(cwnd_full, tp->snd_cwnd_clamp);
406
407 printk(KERN_DEBUG "full_pipe_flag == 1, the cwnd is %u, the mytcp_stopped_cwnd is %u\n",tp->snd_cwnd, tp->mytcp_stopped_cwnd);
408 }
409 else{
410 cwnd_full = min((cwnd + tp->mytcp_stopped_cwnd), tp->snd_ssthresh);
411 tp->snd_cwnd = min(cwnd_full, tp->snd_cwnd_clamp);
412// printk(KERN_DEBUG "full_pipe_flag == 0, the cwnd is %u, the mytcp_stopped_cwnd is %u\n",tp->snd_cwnd, tp->mytcp_stopped_cwnd);
413 tp->mytcp_stopped_cwnd = 0;
414 }
415
416// printk(KERN_DEBUG "in slow start 1, the cwnd is %u, the acked is %u, myfast_bdp is %u\n",tp->snd_cwnd, acked, tp->myfast_bdp);
399 417
400 acked -= cwnd - tp->snd_cwnd; 418 acked -= cwnd - tp->snd_cwnd;
401 tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); 419
420
421 myfast_max_cwnd = max(tp->myfast_bdp, cwnd);
422 u32 myfast_cwnd = min(myfast_max_cwnd, tp->snd_ssthresh);
423 tp->snd_cwnd = min(myfast_cwnd, tp->snd_cwnd_clamp);
424
425
426 printk(KERN_DEBUG "in slow start, the cwnd is %u, the ssth is %u\n",tp->snd_cwnd, tp->snd_ssthresh);
427
428
429
430// tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
402 431
403 return acked; 432 return acked;
404} 433}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 541758cd0..2e878f392 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3183,7 +3183,7 @@ static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
3183} 3183}
3184 3184
3185static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb, 3185static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
3186 u32 prior_snd_una) 3186 const struct sk_buff *ack_skb, u32 prior_snd_una)
3187{ 3187{
3188 const struct skb_shared_info *shinfo; 3188 const struct skb_shared_info *shinfo;
3189 3189
@@ -3195,7 +3195,7 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
3195 if (!before(shinfo->tskey, prior_snd_una) && 3195 if (!before(shinfo->tskey, prior_snd_una) &&
3196 before(shinfo->tskey, tcp_sk(sk)->snd_una)) { 3196 before(shinfo->tskey, tcp_sk(sk)->snd_una)) {
3197 tcp_skb_tsorted_save(skb) { 3197 tcp_skb_tsorted_save(skb) {
3198 __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK); 3198 __skb_tstamp_tx(skb, ack_skb, NULL, sk, SCM_TSTAMP_ACK);
3199 } tcp_skb_tsorted_restore(skb); 3199 } tcp_skb_tsorted_restore(skb);
3200 } 3200 }
3201} 3201}
@@ -3204,8 +3204,8 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
3204 * is before the ack sequence we can discard it as it's confirmed to have 3204 * is before the ack sequence we can discard it as it's confirmed to have
3205 * arrived at the other end. 3205 * arrived at the other end.
3206 */ 3206 */
3207static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack, 3207static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
3208 u32 prior_snd_una, 3208 u32 prior_fack, u32 prior_snd_una,
3209 struct tcp_sacktag_state *sack, bool ece_ack) 3209 struct tcp_sacktag_state *sack, bool ece_ack)
3210{ 3210{
3211 const struct inet_connection_sock *icsk = inet_csk(sk); 3211 const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -3294,7 +3294,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
3294 if (!fully_acked) 3294 if (!fully_acked)
3295 break; 3295 break;
3296 3296
3297 tcp_ack_tstamp(sk, skb, prior_snd_una); 3297 tcp_ack_tstamp(sk, skb, ack_skb, prior_snd_una);
3298 3298
3299 next = skb_rb_next(skb); 3299 next = skb_rb_next(skb);
3300 if (unlikely(skb == tp->retransmit_skb_hint)) 3300 if (unlikely(skb == tp->retransmit_skb_hint))
@@ -3312,7 +3312,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
3312 tp->snd_up = tp->snd_una; 3312 tp->snd_up = tp->snd_una;
3313 3313
3314 if (skb) { 3314 if (skb) {
3315 tcp_ack_tstamp(sk, skb, prior_snd_una); 3315 tcp_ack_tstamp(sk, skb, ack_skb, prior_snd_una);
3316 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 3316 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
3317 flag |= FLAG_SACK_RENEGING; 3317 flag |= FLAG_SACK_RENEGING;
3318 } 3318 }
@@ -3746,6 +3746,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3746 sack_state.rate = &rs; 3746 sack_state.rate = &rs;
3747 sack_state.sack_delivered = 0; 3747 sack_state.sack_delivered = 0;
3748 3748
3749 //my explicit
3750 //获取返回的ack中tcp头部ece字段的信息,并存入tp结构体以便在mytcp拥塞控制算法中调用
3751 struct tcphdr *th;
3752 th = (struct tcphdr *)skb->data;
3753 tp->mytcp_ece_flag = th->ece;
3754 tp->mytcp_ece_count += th->ece;
3755// printk(KERN_DEBUG "tp->mytcp_ece_flag is %d\n", tp->mytcp_ece_flag);
3756
3749 /* We very likely will need to access rtx queue. */ 3757 /* We very likely will need to access rtx queue. */
3750 prefetch(sk->tcp_rtx_queue.rb_node); 3758 prefetch(sk->tcp_rtx_queue.rb_node);
3751 3759
@@ -3849,7 +3857,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3849 goto no_queue; 3857 goto no_queue;
3850 3858
3851 /* See if we can take anything off of the retransmit queue. */ 3859 /* See if we can take anything off of the retransmit queue. */
3852 flag |= tcp_clean_rtx_queue(sk, prior_fack, prior_snd_una, &sack_state, 3860 flag |= tcp_clean_rtx_queue(sk, skb, prior_fack, prior_snd_una, &sack_state,
3853 flag & FLAG_ECE); 3861 flag & FLAG_ECE);
3854 3862
3855 tcp_rack_update_reo_wnd(sk, &rs); 3863 tcp_rack_update_reo_wnd(sk, &rs);
diff --git a/net/ipv4/tcp_mytcp.c b/net/ipv4/tcp_mytcp.c
new file mode 100644
index 000000000..6e9bb84e5
--- /dev/null
+++ b/net/ipv4/tcp_mytcp.c
@@ -0,0 +1,553 @@
1#include <linux/mm.h>
2#include <linux/module.h>
3#include <linux/skbuff.h>
4#include <linux/inet_diag.h>
5//#include </home/tcp/linux-5.7/linux-5.7.10/arch/arm/include/asm/div64.h>
6//#include <div64.h>
7//#include <math.h>
8
9#include <net/tcp.h>
10#include <linux/time.h> // for gettimeofday()
11#include <linux/kernel.h>
12#include "/home/player/Desktop/ohos/src/kernel/linux/linux-5.10/drivers/net/wireless/ath/ath10k/core.h"
13#include "/home/player/Desktop/ohos/src/kernel/linux/linux-5.10/include/net/mac80211.h"
14#include "tcp_vegas.h"
15
16#include "tcp_mytcp.h"
17
18//extern unsigned long mytcp_max_tx_queue;
19//extern unsigned long mytcp_actual_max_queue;
20//extern unsigned long mytcp_actual_min_queue;
21//extern unsigned long mytcp_min_tx_queue;
22//extern unsigned long receive_num_in_RTT;
23//extern unsigned long mytcp_round_count;
24//extern unsigned long mytcp_round_max;
25
26//extern void __skb_tstamp_tx;
27
28//extern int mytcp_flow_num;
29//int this_flow_num;
30
31static int gamma = 2;
32static int RQ_allow = 40;
33
34module_param(gamma, int, 0644);
35MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)");
36
37
38static void mytcp_enable(struct sock *sk)
39{
40 struct tcp_sock *tp = tcp_sk(sk);
41 //struct tcp_sock *tp = tcp_sk(sk);
42 struct mytcp *mytcp = inet_csk_ca(sk);
43
44 /* Begin taking mytcp samples next time we send something. */
45// printk(KERN_DEBUG "enable mytcp\n");
46 mytcp->doing_mytcp_now = 1;
47 tp->mytcp_enable_flag = 1;
48
49 /* Set the beginning of the next send window. */
50
51 mytcp->beg_snd_nxt = 0x7fffffff;
52// mytcp->minRTT = 0x7fffffff;
53// mytcp->slow_start_end = 0;
54 mytcp->full_flag_1 = 0;
55 mytcp->full_flag_2 = 0;
56 mytcp->full_flag_3 = 0;
57 mytcp->cong_flag = 0;
58// mytcp->bytes_rtt = 0;
59// receive_num_in_RTT = 0;
60// mytcp_round_count = 0;
61 tp->mytcp_round_flag = 0;
62 mytcp->min_rtt = 1000000;
63
64 mytcp->time_now = 0;
65// mytcp->time_last = 0;
66
67// mytcp->num_i = 0;
68
69
70}
71
72/* Stop taking mytcp samples for now. */
73static inline void mytcp_disable(struct sock *sk)
74{
75 struct mytcp *mytcp = inet_csk_ca(sk);
76 struct tcp_sock *tp = tcp_sk(sk);
77
78 mytcp->doing_mytcp_now = 0;
79 tp->mytcp_enable_flag = 0;
80}
81
82void tcp_mytcp_init(struct sock *sk)
83{
84 struct mytcp *mytcp = inet_csk_ca(sk);
85 //struct tcp_sock *tp = tcp_sk(sk);
86 mytcp->baseRTT = 0x7fffffff;
87 mytcp->max_queue_within_t = 0;
88 mytcp->min_queue_within_t = 1000;
89// mytcp_flow_num++;
90// this_flow_num = mytcp_flow_num;
91
92 //printk(KERN_DEBUG "init mytcp, the time is %u\n", tcp_jiffies32);
93// printk(KERN_DEBUG "init mytcp\n");
94 //printk(KERN_DEBUG "init mytcp, the ssth is %u\n", tp->snd_ssthresh);
95
96 mytcp_enable(sk);
97}
98EXPORT_SYMBOL_GPL(tcp_mytcp_init);
99
100static const u32 mytcp_interval_ms = 1;
101
102void tcp_mytcp_pkts_acked(struct sock *sk, const struct ack_sample *sample)
103{
104 struct mytcp *mytcp = inet_csk_ca(sk);
105 struct tcp_sock *tp = tcp_sk(sk);
106// int time_interval;
107 int buffer_queue;
108 //u32 msecstojiffies;
109 u32 vrtt;
110
111 if (sample->rtt_us < 0)
112 return;
113
114 vrtt = sample->rtt_us + 1;
115// mytcp->rtt_of_this_packet = sample->rtt_us;
116 tp->my_ip_rtt = sample->rtt_us;
117
118 buffer_queue = tp->write_seq - tp->snd_una; //sndbuffer中的当前队列长度,单位为byte
119// printk(KERN_DEBUG "packets inflight are %u\n", tp->packets_out);
120 printk(KERN_DEBUG "the mytcp RTT is %u\n",sample->rtt_us);
121
122
123 /* Filter to find propagation delay: */
124 if (vrtt < mytcp->baseRTT)
125 mytcp->baseRTT = vrtt;
126
127 /* Find the min RTT during the last RTT to find
128 * the current prop. delay + queuing delay:
129 */
130// mytcp->minRTT = min(mytcp->minRTT, vrtt);
131 if (sample->rtt_us < mytcp->min_rtt){
132 mytcp->min_rtt = sample->rtt_us;
133// printk(KERN_DEBUG "the min RTT is %u\n",mytcp->min_rtt);
134 }
135
136 if (tp->myfast_cal_bdp_start_flag == 1){
137 if (tp->myfast_min_rtt == 0){
138 tp->myfast_min_rtt = sample->rtt_us;
139 }
140 else{
141 tp->myfast_min_rtt = min(tp->myfast_min_rtt, sample->rtt_us);
142 }
143 }
144
145
146}
147EXPORT_SYMBOL_GPL(tcp_mytcp_pkts_acked);
148
149void tcp_mytcp_state(struct sock *sk, u8 ca_state)
150{
151 struct tcp_sock *tp = tcp_sk(sk);
152// struct mytcp *mytcp = inet_csk_ca(sk);
153 if (ca_state == TCP_CA_Open){
154// mytcp_enable(sk);
155 tp->snd_cwnd = tp->snd_cwnd;
156// printk(KERN_DEBUG "nothing happens, the cwnd is %u\n", tp->snd_cwnd);
157// printk(KERN_DEBUG "nothing happens, \n");
158 }
159 else{
160// mytcp_disable(sk);
161 tp->snd_cwnd = tp->snd_cwnd;
162// printk(KERN_DEBUG "bad things happen, the cwnd is %u, the snd_ssthresh is %u\n", tp->snd_cwnd, tp->snd_ssthresh);
163// printk(KERN_DEBUG "bad things happens\n");
164 }
165}
166EXPORT_SYMBOL_GPL(tcp_mytcp_state);
167
168
169void tcp_mytcp_cwnd_event(struct sock *sk, enum tcp_ca_event event)
170{
171// struct tcp_sock *tp = tcp_sk(sk);
172// struct mytcp *mytcp = inet_csk_ca(sk);
173
174 if (event == CA_EVENT_CWND_RESTART||
175 event == CA_EVENT_TX_START){
176// printk(KERN_DEBUG "event CA_EVENT_CWND_RESTART happens, init mytcp\n");
177 tcp_mytcp_init(sk);
178 }
179
180}
181EXPORT_SYMBOL_GPL(tcp_mytcp_cwnd_event);
182
183
184//因为有是基于残余队列进行控制,不再依赖丢包信息,因此与BBR相似即使有丢包也不会将cwnd减半
185//由于限制了队列堆积程度,QCC的丢包较少,即使可能由于链路容量突然变小有拥塞丢包,也会检测到RQ的存在而降低cwnd
186u32 tcp_mytcp_ssthresh(struct sock *sk)
187{
188 struct tcp_sock *tp = tcp_sk(sk);
189 struct mytcp *mytcp = inet_csk_ca(sk);
190 tp->snd_cwnd = tp->snd_cwnd;
191// tp->snd_ssthresh = tp->snd_cwnd/2;
192// printk(KERN_DEBUG "the thresh is %u, cwnd is %u\n", tp->snd_ssthresh, tp->snd_cwnd);
193// mytcp->loss_cnt++;
194// return tcp_sk(sk)->snd_ssthresh;
195 return tp->snd_cwnd;
196}
197
198static void tcp_mytcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
199{
200 struct tcp_sock *tp = tcp_sk(sk);
201 struct mytcp *mytcp = inet_csk_ca(sk);
202 int buffer_queue;
203 int min_inflight;
204
205 mytcp->inflight_2pre = mytcp->inflight_1pre;
206 mytcp->inflight_1pre = mytcp->inflight_now;
207
208 buffer_queue = tp->write_seq - tp->snd_una; //sndbuffer中的当前队列长度,单位为byte
209 mytcp->inflight_now = tp->packets_out;
210 if (mytcp->inflight_1pre < mytcp->inflight_2pre && mytcp->inflight_1pre < mytcp->inflight_now){
211 min_inflight = mytcp->inflight_1pre;
212// printk(KERN_DEBUG "min inflight is %u\n", min_inflight);
213// printk(KERN_DEBUG "min_queue is %d\n", mytcp->min_queue_within_t);
214 }
215
216
217 mytcp->cwnd_last = tp->snd_cwnd;
218// mytcp->mytcp_max_driver_queue = mytcp_actual_max_queue;
219 mytcp->mytcp_max_driver_queue = tp->mytcp_est_max_queue_real;
220
221
222// printk(KERN_DEBUG "mytcp_max_driver_queue is %u\n", mytcp->mytcp_max_driver_queue);
223// printk(KERN_DEBUG "max_queue_within_t is %u\n", mytcp->max_queue_within_t);
224
225 if (mytcp->mytcp_max_driver_queue > mytcp->max_queue_within_t){ //mytcp+++++++//
226 mytcp->max_queue_within_t = mytcp->mytcp_max_driver_queue;
227 }
228
229// mytcp->min_queue_within_t = mytcp_min_tx_queue;
230// mytcp->min_queue = mytcp_min_tx_queue;
231
232 mytcp->min_queue_within_t = tp->mytcp_est_rq;
233 mytcp->min_queue = tp->mytcp_est_rq;
234
235 if ((mytcp->min_queue_within_t > 0) && (mytcp->max_queue_within_t > mytcp->min_queue_within_t)){
236 mytcp->one_full_ronund_send = mytcp->max_queue_within_t - mytcp->min_queue_within_t;
237 mytcp->threshold = (mytcp->one_full_ronund_send * 5) / 100;
238 mytcp->threshold = max(mytcp->threshold,RQ_allow);
239 }
240 else{
241 mytcp->threshold = RQ_allow;
242 }
243// printk(KERN_DEBUG "min_queue_within_t is %d\n", mytcp->min_queue_within_t);
244// printk(KERN_DEBUG "max_queue_within_t is %d\n", mytcp->max_queue_within_t);
245// printk(KERN_DEBUG "one_full_ronund_send is %d\n", mytcp->one_full_ronund_send);
246// printk(KERN_DEBUG "mytcp->threshold is %u\n", mytcp->threshold);
247
248
249 if (!mytcp->doing_mytcp_now) {
250 tcp_reno_cong_avoid(sk, ack, acked);
251// printk(KERN_DEBUG "in reno_cong_avoid, the cwnd is %u\n", tp->snd_cwnd);
252 return;
253 }
254
255
256 if (tcp_in_slow_start(tp)) {
257 tcp_slow_start(tp, acked);
258 mytcp->cwnd_hold = tp->snd_cwnd;
259 }
260
261//当没有显式拥塞的信息返回时,按照正常的QCC逻辑利用驱动残余队列调整cwnd
262 if (!tp->mytcp_ece_flag){
263
264 //与BBR类似,当连续三轮网卡发送检测出超过阈值的堆积时,才判定占满带宽,退出慢启动。
265 if(/*mytcp_round_count*/ tp->mytcp_round_flag >= 1 && tcp_in_slow_start(tp)){
266 //mytcp_round_count = 0;
267 tp->mytcp_round_flag = 0;
268 if (tcp_in_slow_start(tp)) {
269 int flag_count = 0;
270 mytcp->full_flag_3 = mytcp->full_flag_2;
271 mytcp->full_flag_2 = mytcp->full_flag_1;
272 mytcp->min_queue_within_t = tp->mytcp_est_rq;
273 mytcp->full_flag_1 = mytcp->min_queue_within_t;
274 if(mytcp->full_flag_1 > mytcp->threshold){
275 flag_count ++;
276 tp->mytcp_full_pipe_flag = 1;
277 }
278 else{
279 tp->mytcp_full_pipe_flag = 0;
280 }
281 if(mytcp->full_flag_2 > mytcp->threshold){
282 flag_count ++;
283 }
284 if(mytcp->full_flag_3 > mytcp->threshold){
285 flag_count ++;
286 }
287// printk(KERN_DEBUG "in slow start, the flag1 is %d, the flag2 is %d, the flag3 is %d\n", mytcp->full_flag_1, mytcp->full_flag_2, mytcp->full_flag_3);
288 //判定占满的条件是残余队列连续三轮大于阈值,或者一旦超过50(表明此时堆积很大)
289 //50这个参数取的经验值,调试分析发现这一个值可以有一定变化影响也不大,因为于拥塞避免阶段也具有调整能力
290 if ((flag_count >= 3 && /*mytcp_round_max*/tp->mytcp_est_max_queue_real > 50) || (mytcp->full_flag_1 > 80) || (mytcp->loss_cnt > 3))
291 {
292 // mytcp->time_last = ktime_get_real_ns();
293 // mytcp->cwnd_first = tp->snd_cwnd - mytcp->full_flag_1;
294 mytcp->cwnd_first = tp->snd_cwnd;
295 // tp->snd_cwnd = mytcp->cwnd_first;
296 // tp->snd_ssthresh = tcp_mytcp_ssthresh(tp)/2;
297 tp->snd_ssthresh = mytcp->cwnd_first/2;
298 mytcp->time_last = ktime_get_real_ns();
299 mytcp->beg_snd_nxt = tp->snd_nxt;
300 tp->mytcp_bytes_send = 0;
301 tp->mytcp_acked_seq_last = ack;
302 printk(KERN_DEBUG "in slow start, full, the snd_cwnd is %d, the ssh is %d\n", mytcp->cwnd_first, tp->snd_ssthresh);
303 }
304// printk(KERN_DEBUG "in slow start, the source port is %d, the snd_cwnd is %d, \n", tp->myfast_source, tp->snd_cwnd);
305 }
306
307 // receive_num_in_RTT = 0;
308 mytcp->max_queue_within_t = 0;
309// mytcp_actual_max_queue = 0;
310// mytcp_round_max = 0;
311 tp->mytcp_est_max_queue_real = 0;
312// mytcp->min_queue_within_t = 1000;
313// mytcp_actual_min_queue = 1000;
314 mytcp->cwnd_hold = tp->snd_cwnd;
315 }
316
317 else if(/*mytcp_round_count*/ tp->mytcp_round_flag >= 1 && !tcp_in_slow_start(tp)){
318
319 if(mytcp->max_queue_within_t > /*mytcp_round_max*/tp->mytcp_est_max_queue_real){
320 mytcp->max_queue_now = mytcp->max_queue_within_t;
321 }
322 else{
323 mytcp->max_queue_now = /*mytcp_round_max*/tp->mytcp_est_max_queue_real;
324 }
325 //mytcp->max_queue_within_t;
326 // mytcp_round_count = 0;
327 tp->mytcp_round_flag = 0;
328 // mytcp->cwnd_last = tp->snd_cwnd;
329 if (mytcp->cong_flag == 3){
330 tp->snd_cwnd = mytcp->cwnd_first;
331 if(mytcp->max_queue_within_t > /*mytcp_round_max*/tp->mytcp_est_max_queue_real){
332 mytcp->max_queue_first = mytcp->max_queue_within_t;
333 }
334 else{
335 mytcp->max_queue_first = /*mytcp_round_max*/tp->mytcp_est_max_queue_real;
336 }
337 // mytcp->max_queue_first = mytcp_round_max; //mytcp->max_queue_within_t;
338 mytcp->queue_now = (mytcp->max_queue_first - mytcp->min_queue_within_t) + 1;
339 mytcp->min_last = mytcp->min_queue_within_t;
340 mytcp->cong_flag = 99;
341 // mytcp->time_tmp = ktime_get_real_ns();
342 }
343
344 else if (mytcp->cong_flag < 3){
345 tp->snd_cwnd = mytcp->cwnd_first;
346 mytcp->cong_flag ++;
347// printk(KERN_DEBUG "mytcp->cong_flag < 3, snd_cwnd is %u\n", tp->snd_cwnd);
348 }
349
350 else{
351 mytcp->queue_last = mytcp->queue_now;
352 if(mytcp->min_last == 0 && mytcp->min_queue_within_t == 0){ //连续n轮最小队列长度为0,则cwnd增长步长变为n*10
353 // mytcp->cwnd_last = mytcp->cwnd_last + mytcp->threshold;
354 mytcp->cwnd_last = mytcp->cwnd_last + RQ_allow;
355 }
356
357 else {
358 mytcp->cwnd_first = mytcp->cwnd_first;
359 }
360
361 mytcp->min_last = mytcp->min_queue_within_t;
362
363 if(mytcp->min_queue_within_t > 0 && mytcp->min_queue_within_t < mytcp->threshold){
364 tp->snd_cwnd = tp->snd_cwnd;
365 mytcp->cwnd_first = tp->snd_cwnd;
366 mytcp->max_queue_first = mytcp->max_queue_now;
367// printk(KERN_DEBUG "suitable, snd_cwnd is %u\n", tp->snd_cwnd);
368 // printk(KERN_DEBUG "111111111111111111111111111, into the update, snd_cwnd is %u\n", tp->snd_cwnd);
369 }
370// else if(/*mytcp->max_queue_now > 10 && */mytcp->queue_now > 5){ //确保队列中至少有5个包的进出
371 else{
372 if(mytcp->min_queue_within_t > mytcp->threshold){
373 tp->snd_cwnd = mytcp->cwnd_last - (mytcp->min_queue_within_t - mytcp->threshold);
374// printk(KERN_DEBUG "too_long, snd_cwnd is %u\n", tp->snd_cwnd);
375 }
376 if(mytcp->min_queue_within_t == 0){
377 tp->snd_cwnd = mytcp->cwnd_last + mytcp->threshold;
378// printk(KERN_DEBUG "empty, snd_cwnd is %u\n", tp->snd_cwnd);
379 }
380 }
381// else{
382// tp->snd_cwnd = tp->snd_cwnd;
383// }
384// }
385// else{
386// tp->snd_cwnd = tp->snd_cwnd;
387
388// }
389
390 }
391
392 printk(KERN_DEBUG "111111111111111111111111111, into the cong_avoid, tp->snd_cwnd is %u\n", tp->snd_cwnd);
393 printk(KERN_DEBUG "111111111111111111111111111, into the cong_avoid, min_queue_within_t is %d\n", mytcp->min_queue_within_t);
394 // printk(KERN_DEBUG "111111111111111111111111111, into the cong_avoid, queue last is %d\n", mytcp->queue_last);
395 // printk(KERN_DEBUG "111111111111111111111111111, into the cong_avoid, queue now is %d\n", mytcp->queue_now);
396
397 // mytcp->min_queue_within_t = mytcp->max_queue_within_t;
398 mytcp->max_queue_within_t = 0;
399// mytcp_actual_max_queue = 0;
400// mytcp_round_max = 0;
401 tp->mytcp_est_max_queue_real = 0;
402// mytcp->min_queue_within_t = 1000;
403// mytcp_actual_min_queue = 1000;
404 if (tp->snd_cwnd < 10)
405 tp->snd_cwnd = 10;
406 // if (tp->snd_cwnd > 30)
407 // tp->snd_cwnd = 30;
408 else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
409 tp->snd_cwnd = tp->snd_cwnd_clamp;
410 // tp->snd_ssthresh = tcp_current_ssthresh(sk);
411// printk(KERN_DEBUG "111111111111111111111111111, into the cong_avoid, the last_cwnd is %u\n", mytcp->cwnd_last);
412 // printk(KERN_DEBUG "111111111111111111111111111, into the cong_avoid, the cwnd is %u\n", tp->snd_cwnd);
413 mytcp->cwnd_hold = tp->snd_cwnd;
414 }
415
416 else{
417 tp->snd_cwnd = mytcp->cwnd_hold;
418// printk(KERN_DEBUG "111111111111111111111111111, else, the cwnd is %u\n", tp->snd_cwnd);
419 }
420 }
421
422// 每一个rtt统计一次有线瓶颈的场景
423 if (after(ack, mytcp->beg_snd_nxt) && !tcp_in_slow_start(tp)){ //瓶颈为有线部分时的处理
424
425 int packets_in_air;
426 int RQ_wire;
427 int RQ_wire2;
428 int extra_cwnd;
429 int throughput2;
430
431 mytcp->beg_snd_nxt = tp->snd_nxt;
432 mytcp->time_now = ktime_get_real_ns();
433
434 //从确认占满链路后100ms再开始计算吞吐量,避免慢启动发送窗口耗尽对带宽估算的影响。
435// if ((mytcp->time_now - mytcp->time_last)>(100*1000)){
436
437 tp->mytcp_acked_bytes = ack - tp->mytcp_acked_seq_last;
438 mytcp->throughput_now = (tp->mytcp_bytes_send * 8) / ((mytcp->time_now - mytcp->time_last)/1000);
439 throughput2 = (tp->mytcp_acked_bytes * 8) / ((mytcp->time_now - mytcp->time_last)/1000);
440
441 if (mytcp->throughput_now > 0){
442 packets_in_air = (((mytcp->throughput_now * 2)/1) * mytcp->min_rtt)/(tp->mss_cache*8);
443 RQ_wire = tp->packets_out - mytcp->min_queue - packets_in_air;
444 }
445
446 else {
447 RQ_wire = 0;
448 }
449
450 if(RQ_wire < 0){
451 RQ_wire = 0;
452 }
453
454 if(RQ_wire > mytcp->threshold){
455 extra_cwnd = RQ_wire - mytcp->threshold;
456// tp->snd_cwnd = mytcp->cwnd_last - (RQ_wire - mytcp->threshold);
457 }
458
459 else{
460 extra_cwnd = 0;
461// tp->snd_cwnd = tp->snd_cwnd;
462 }
463
464 tp->snd_cwnd = mytcp->cwnd_last - max(tp->mytcp_ece_count, extra_cwnd);
465
466 if (tp->snd_cwnd > (2*packets_in_air)){
467 tp->snd_cwnd = (2*packets_in_air);
468 }
469
470// printk(KERN_DEBUG "the bytes ackes is %llu, time interval is %llu\n", tp->bytes_acked, mytcp->time_now - mytcp->time_last);
471 printk(KERN_DEBUG "the throughput is %u, throughput2 is %u, RQ_wire is %u\n", mytcp->throughput_now, throughput2, RQ_wire);
472 printk(KERN_DEBUG "wired bottle, the packets out is %u, min queue is %u, packets in air is %d, the RQ wire is %d\n",
473 tp->packets_out, mytcp->min_queue, packets_in_air, RQ_wire);
474// printk(KERN_DEBUG "wired bottle, the mytcp_ece_count is %u, extra_cwnd is %u\n",tp->mytcp_ece_count, extra_cwnd);
475 printk(KERN_DEBUG "111111111111111111111111111, wired bottle, into the cong_avoid, the cwnd is %u\n", tp->snd_cwnd);
476// }
477 //将ece计数器置0,重新开始统计下一轮rtt内的ece数量
478 tp->mytcp_ece_count = 0;
479 tp->mytcp_bytes_send = 0;
480 tp->mytcp_acked_bytes = ack;
481 mytcp->time_last = mytcp->time_now;
482 mytcp->cwnd_hold = tp->snd_cwnd;
483 }
484
485 else{
486 tp->snd_cwnd = mytcp->cwnd_hold;
487// printk(KERN_DEBUG "111111111111111111111111111, else, the cwnd is %u\n", tp->snd_cwnd);
488 }
489
490
491
492
493// printk(KERN_DEBUG "111111111111111111111111111, into the cong_avoid, the actual cwnd is %u\n", tp->snd_cwnd);
494
495}
496
497
498
499/* Extract info for Tcp socket info provided via netlink. */
500size_t tcp_mytcp_get_info(struct sock *sk, u32 ext, int *attr,
501 union tcp_cc_info *info)
502{
503 const struct mytcp *ca = inet_csk_ca(sk);
504
505 if (ext & (1 << (INET_DIAG_MYTCPINFO - 1))) {
506 info->mytcp.tcpv_enabled = ca->doing_mytcp_now;
507 info->mytcp.tcpv_rttcnt = ca->cntRTT;
508 info->mytcp.tcpv_rtt = ca->baseRTT;
509 info->mytcp.tcpv_minrtt = ca->min_rtt;
510
511 *attr = INET_DIAG_MYTCPINFO;
512 return sizeof(struct tcpmytcp_info);
513 }
514 return 0;
515}
516//EXPORT_SYMBOL_GPL(tcp_mytcp_get_info);
517
518static struct tcp_congestion_ops tcp_mytcp __read_mostly = {
519 .init = tcp_mytcp_init,
520 .ssthresh = tcp_mytcp_ssthresh,
521 .undo_cwnd = tcp_reno_undo_cwnd,
522 .cong_avoid = tcp_mytcp_cong_avoid,
523 .pkts_acked = tcp_mytcp_pkts_acked,
524// .set_state = tcp_mytcp_state,
525// .cwnd_event = tcp_mytcp_cwnd_event,
526 .get_info = tcp_mytcp_get_info,
527
528 .owner = THIS_MODULE,
529 .name = "mytcp",
530};
531
532static int __init tcp_mytcp_register(void)
533{
534 BUILD_BUG_ON(sizeof(struct mytcp) > ICSK_CA_PRIV_SIZE);
535 tcp_register_congestion_control(&tcp_mytcp);
536 return 0;
537}
538
539static void __exit tcp_mytcp_unregister(void)
540{
541 tcp_unregister_congestion_control(&tcp_mytcp);
542}
543
544module_init(tcp_mytcp_register);
545module_exit(tcp_mytcp_unregister);
546
547MODULE_AUTHOR("LLG");
548MODULE_LICENSE("GPL");
549MODULE_DESCRIPTION("TCP mytcp");
550
551
552
553
diff --git a/net/ipv4/tcp_mytcp.h b/net/ipv4/tcp_mytcp.h
new file mode 100644
index 000000000..ff70d94a6
--- /dev/null
+++ b/net/ipv4/tcp_mytcp.h
@@ -0,0 +1,74 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * TCP mytcp congestion control interface
4 */
5#ifndef __TCP_mytcp_H
6#define __TCP_mytcp_H 1
7
8//extern int mytcp_flow_num; //****the num of mytcp flow*****//
9
10/* mytcp variables */
11struct mytcp {
12 u32 beg_snd_nxt; /* right edge during last RTT */
13 u32 beg_snd_cwnd; /* saves the size of the cwnd */
14 u8 doing_mytcp_now;/* if true, do mytcp for this RTT */
15// u16 count_i; /* # of RTTs measured within last RTT */
16// u32 RTT_i; /* min of RTTs measured within last RTT (in usec) */
17// u32 RTT_i_last;
18// u32 sum_RTT_i;
19// u32 rtt_of_this_packet;
20// u32 standard_rtt;
21// u32 sum_RTT_i_last;
22 u32 cwnd_last;
23 u32 one_full_ronund_send;
24 u32 threshold;
25// u16 num_i;
26// u16 full_pipe_flag;
27 u32 cwnd_hold;
28
29 u16 full_flag_1;
30 u16 full_flag_2;
31 u16 full_flag_3;
32 u16 cong_flag;
33 u32 queue_last;
34 u32 queue_now;
35 u16 cntRTT;
36 int loss_cnt;
37 u16 throughput_now;
38 u16 max_queue_within_t;
39 u16 min_queue_within_t;
40 u16 mytcp_max_driver_queue;
41 u16 mytcp_min_driver_queue;
42// u16 max_queue_last;
43 u16 max_queue_first;
44 u16 max_queue_now;
45 u16 cwnd_first;
46 u16 min_last;
47 u64 time_last;
48 u64 time_now;
49 u16 min_queue;
50 u16 inflight_2pre;
51 u16 inflight_1pre;
52 u16 inflight_now;
53// u16 slow_start_end;
54// u32 slow_start_end_cwnd;
55// u32 minRTT;
56 u32 min_rtt;
57 u32 baseRTT;
58// u16 queue_update;
59// u16 num_i_last;
60// u32 average_RTT_i;
61// u32 average_RTT_i_last;
62// u32 i_interval_stamp; /* the min of all mytcp RTT measurements seen (in usec) */
63// u16 start_indicator;
64// u16 update;
65};
66
67void tcp_mytcp_init(struct sock *sk);
68void tcp_mytcp_state(struct sock *sk, u8 ca_state);
69void tcp_mytcp_pkts_acked(struct sock *sk, const struct ack_sample *sample);
70void tcp_mytcp_cwnd_event(struct sock *sk, enum tcp_ca_event event);
71size_t tcp_mytcp_get_info(struct sock *sk, u32 ext, int *attr,
72 union tcp_cc_info *info);
73
74#endif /* __TCP_mytcp_H */
diff --git a/net/ipv4/tcp_mytcplte.c b/net/ipv4/tcp_mytcplte.c
new file mode 100644
index 000000000..a3bc7fab2
--- /dev/null
+++ b/net/ipv4/tcp_mytcplte.c
@@ -0,0 +1,436 @@
1#include <linux/mm.h>
2//#include <linux/build_bug.h>
3#include <linux/module.h>
4//#include <linux/skbuff.h>
5#include <linux/inet_diag.h>
6//#include </home/tcp/linux-5.7/linux-5.7.10/arch/arm/include/asm/div64.h>
7//#include <div64.h>
8//#include <math.h>
9
10#include <net/tcp.h>
11#include <linux/time.h> // for gettimeofday()
12#include <linux/kernel.h>
13//#include "/home/ubuntu16/linux/drivers/net/wireless/ath/ath10k/core.h"
14//#include "/home/ubuntu16/linux/include/net/mac80211.h"
15#include "tcp_vegas.h"
16
17#include "tcp_mytcplte.h"
18
19/*
20extern unsigned long mytcplte_max_tx_queue;
21extern unsigned long mytcplte_actual_max_queue;
22extern unsigned long mytcplte_actual_min_queue;
23extern unsigned long mytcplte_min_tx_queue;
24extern unsigned long receive_num_in_RTT;
25extern unsigned long mytcplte_round_count;
26extern unsigned long mytcplte_round_max;
27*/
28
29//extern void __skb_tstamp_tx;
30
31//extern int mytcplte_flow_num;
32//int this_flow_num;
33
34static int gamma = 2;
35
36module_param(gamma, int, 0644);
37MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)");
38
39
40static void mytcplte_enable(struct sock *sk)
41{
42 const struct tcp_sock *tp = tcp_sk(sk);
43 //struct tcp_sock *tp = tcp_sk(sk);
44 struct mytcplte *mytcplte = inet_csk_ca(sk);
45
46 /* Begin taking mytcplte samples next time we send something. */
47 printk(KERN_DEBUG "enable mytcplte\n");
48 mytcplte->doing_mytcplte_now = 1;
49// mytcplte->time_last = ktime_get_real_ns();
50
51 /* Set the beginning of the next send window. */
52 mytcplte->beg_snd_nxt = tp->snd_nxt;
53 mytcplte->minRTT = 0x7fffffff;
54// mytcplte->slow_start_end = 0;
55 mytcplte->full_flag_1 = 0;
56 mytcplte->full_flag_2 = 0;
57 mytcplte->full_flag_3 = 0;
58 mytcplte->cong_flag = 0;
59 mytcplte->inflight_1pre = 0;
60 mytcplte->inflight_2pre = 0;
61 mytcplte->inflight_now = 0;
62 mytcplte->min_rtt = 1000000;
63// receive_num_in_RTT = 0;
64 mytcplte->mytcplte_round_count = 0;
65
66 //tp->snd_cwnd = 10;
67// mytcplte->count_i = 0;
68 mytcplte->cntRTT = 0;
69 mytcplte->cntRTT_2 = 0;
70// mytcplte->RTT_i = 0;
71 mytcplte->num_i = 0;
72// mytcplte->sum_RTT_i = 0;
73// mytcplte->start_indicator = 0;
74// mytcplte->update = 0;
75// printk(KERN_DEBUG "enable mytcplte, the time is %u\n", tcp_jiffies32);
76// printk(KERN_DEBUG "enable mytcplte, the cwnd is %u\n", tp->snd_cwnd);
77// printk(KERN_DEBUG "enable mytcplte, the ssth is %u\n", tp->snd_ssthresh);
78
79}
80
81/* Stop taking mytcplte samples for now. */
82static inline void mytcplte_disable(struct sock *sk)
83{
84 struct mytcplte *mytcplte = inet_csk_ca(sk);
85// printk(KERN_DEBUG "disable mytcplte\n");
86 mytcplte->doing_mytcplte_now = 0;
87}
88
89void tcp_mytcplte_init(struct sock *sk)
90{
91 struct mytcplte *mytcplte = inet_csk_ca(sk);
92 //struct tcp_sock *tp = tcp_sk(sk);
93 mytcplte->baseRTT = 0x7fffffff;
94 mytcplte->max_queue_within_t = 0;
95 mytcplte->min_queue_within_t = 1000;
96// mytcplte_flow_num++;
97// this_flow_num = mytcplte_flow_num;
98
99 //printk(KERN_DEBUG "init mytcplte, the time is %u\n", tcp_jiffies32);
100 printk(KERN_DEBUG "init mytcplte\n");
101 //printk(KERN_DEBUG "init mytcplte, the ssth is %u\n", tp->snd_ssthresh);
102
103 mytcplte_enable(sk);
104}
105EXPORT_SYMBOL_GPL(tcp_mytcplte_init);
106
107
108static const u32 mytcplte_interval_ms = 1;
109
110void tcp_mytcplte_pkts_acked(struct sock *sk, const struct ack_sample *sample)
111{
112 struct mytcplte *mytcplte = inet_csk_ca(sk);
113 struct tcp_sock *tp = tcp_sk(sk);
114// int time_interval;
115 int buffer_queue;
116
117 //u32 msecstojiffies;
118 u32 vrtt;
119
120 if (sample->rtt_us < 0)
121 return;
122
123 if (sample->rtt_us < mytcplte->min_rtt){
124 mytcplte->min_rtt = sample->rtt_us;
125 }
126
127 vrtt = sample->rtt_us + 1;
128// mytcplte->rtt_of_this_packet = sample->rtt_us;
129 mytcplte->cntRTT++;
130 mytcplte->cntRTT_2++;
131
132 buffer_queue = tp->write_seq - tp->snd_una; //sndbuffer中的当前队列长度,单位为byte
133// printk(KERN_DEBUG "write_seq is %u\n", tp->write_seq);
134 printk(KERN_DEBUG "total num sent is %llu\n", tp->bytes_sent);
135 printk(KERN_DEBUG "total num acked is %llu\n", tp->bytes_acked);
136 printk(KERN_DEBUG "snd_una is %llu\n", tp->snd_una);
137// printk(KERN_DEBUG "buffer queue is %u\n", buffer_queue);
138 printk(KERN_DEBUG "packets inflight are %u\n", tp->packets_out);
139 printk(KERN_DEBUG "the mytcplte RTT is %u\n",sample->rtt_us);
140 printk(KERN_DEBUG "min rtt is %llu\n", mytcplte->min_rtt);
141
142
143 /* Filter to find propagation delay: */
144 if (vrtt < mytcplte->baseRTT)
145 mytcplte->baseRTT = vrtt;
146
147 /* Find the min RTT during the last RTT to find
148 * the current prop. delay + queuing delay:
149 */
150 mytcplte->minRTT = min(mytcplte->minRTT, vrtt);
151
152}
153EXPORT_SYMBOL_GPL(tcp_mytcplte_pkts_acked);
154
155void tcp_mytcplte_state(struct sock *sk, u8 ca_state)
156{
157 struct tcp_sock *tp = tcp_sk(sk);
158 struct mytcplte *mytcplte = inet_csk_ca(sk);
159// printk(KERN_DEBUG "mytcplte state\n");
160
161 if (ca_state == TCP_CA_Open){
162// mytcplte_enable(sk);
163 tp->snd_cwnd = tp->snd_cwnd;
164// printk(KERN_DEBUG "nothing happens, the cwnd is %u\n", tp->snd_cwnd);
165// printk(KERN_DEBUG "nothing happens, \n");
166 }
167 else{
168// mytcplte_disable(sk);
169 tp->snd_cwnd = mytcplte->cwnd_first;
170// printk(KERN_DEBUG "bad things happen, the cwnd is %u\n", tp->snd_cwnd);
171// printk(KERN_DEBUG "bad things happens\n");
172 }
173}
174EXPORT_SYMBOL_GPL(tcp_mytcplte_state);
175
176
177void tcp_mytcplte_cwnd_event(struct sock *sk, enum tcp_ca_event event)
178{
179// struct tcp_sock *tp = tcp_sk(sk);
180// struct mytcplte *mytcplte = inet_csk_ca(sk);
181// printk(KERN_DEBUG "mytcplte event\n");
182
183
184 if (event == CA_EVENT_CWND_RESTART||
185 event == CA_EVENT_TX_START){
186
187 tcp_mytcplte_init(sk);
188 }
189
190}
191EXPORT_SYMBOL_GPL(tcp_mytcplte_cwnd_event);
192
193
194static inline u32 tcp_mytcplte_ssthresh(struct tcp_sock *tp)
195{
196// printk(KERN_DEBUG "the thresh is\n");
197 return min(tp->snd_ssthresh, tp->snd_cwnd);
198}
199
200static void tcp_mytcplte_cong_avoid(struct sock *sk, u32 ack, u32 acked)
201{
202 struct tcp_sock *tp = tcp_sk(sk);
203 struct mytcplte *mytcplte = inet_csk_ca(sk);
204 int buffer_queue;
205// printk(KERN_DEBUG "mytcplte cong_avoid\n");
206
207// int min_inflight;
208
209 if (tp->mytcp_round_flag >= 2){
210 mytcplte->mytcplte_round_count = 2;
211 tp->mytcp_round_flag = 0;
212 }
213
214
215// printk(KERN_DEBUG "inflight_2pre is %u\n", mytcplte->inflight_2pre);
216// printk(KERN_DEBUG "inflight_1pre is %u\n", mytcplte->inflight_1pre);
217// printk(KERN_DEBUG "inflight_now is %u\n", mytcplte->inflight_now);
218
219
220
221
222 mytcplte->cwnd_last = tp->snd_cwnd;
223
224 if (!mytcplte->doing_mytcplte_now) {
225 tcp_reno_cong_avoid(sk, ack, acked);
226// printk(KERN_DEBUG "in reno_cong_avoid, the cwnd is %u\n", tp->snd_cwnd);
227 return;
228 }
229
230// printk(KERN_DEBUG "in mytcplte cong_avoid, the mytcplte->mytcplte_round_count is %u\n", mytcplte->mytcplte_round_count);
231 if(mytcplte->mytcplte_round_count == 2 && !tcp_in_slow_start(tp)){
232
233 mytcplte->mytcplte_round_count = 0;
234// mytcplte->cwnd_last = tp->snd_cwnd;
235 if (mytcplte->cong_flag == 3){
236// printk(KERN_DEBUG "mytcplte->cong_flag = 3\n");
237 tp->snd_cwnd = mytcplte->cwnd_first;
238 mytcplte->cong_flag = 99;
239
240 }
241 else if (mytcplte->cong_flag < 3){
242// printk(KERN_DEBUG "mytcplte->cong_flag < 3\n");
243 tp->snd_cwnd = mytcplte->cwnd_first;
244
245 mytcplte->cong_flag ++;
246
247 }
248 else{
249 if(tp->mytcp_est_rq > 5){
250 tp->snd_cwnd = mytcplte->cwnd_last - (tp->mytcp_est_rq - 5);
251 printk(KERN_DEBUG "++++\n");
252 }
253 if(tp->mytcp_est_rq == 0){
254 tp->snd_cwnd = mytcplte->cwnd_last + 5;
255 printk(KERN_DEBUG "----\n");
256 }
257 else{
258 tp->snd_cwnd = tp->snd_cwnd;
259 }
260 }
261
262 }
263
264
265
266 if (tp->snd_cwnd < 2)
267 tp->snd_cwnd = 2;
268 // if (tp->snd_cwnd > 30)
269 // tp->snd_cwnd = 30;
270 else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
271 tp->snd_cwnd = tp->snd_cwnd_clamp;
272
273 mytcplte->minRTT = 0x7fffffff;
274 mytcplte->cntRTT_2 = 0;
275
276
277 if(mytcplte->mytcplte_round_count == 2 && tcp_in_slow_start(tp)){
278
279// printk(KERN_DEBUG "mytcplte in slow start\n");
280
281 mytcplte->mytcplte_round_count = 0;
282 if (tcp_in_slow_start(tp)) {
283 int flag_count = 0;
284 mytcplte->full_flag_3 = mytcplte->full_flag_2;
285 mytcplte->full_flag_2 = mytcplte->full_flag_1;
286 mytcplte->full_flag_1 = tp->mytcp_est_rq;
287
288 if (mytcplte->full_flag_1 > 10){
289 mytcplte->time_last = ktime_get_real_ns();
290 }
291// mytcplte->full_flag_1 = throughput_cntRTT;
292 if(mytcplte->full_flag_1 > 10){
293 flag_count ++;
294 if(mytcplte->time_start_flag == 0){
295 mytcplte->time_last = ktime_get_real_ns();
296 mytcplte->time_start_flag = 1;
297 }
298 tp->mytcp_full_pipe_flag = 1;
299 }
300 else{
301 tp->mytcp_full_pipe_flag = 0;
302 }
303
304 if(mytcplte->full_flag_2 > 10){
305 flag_count ++;
306 }
307 if(mytcplte->full_flag_3 > 10){
308 flag_count ++;
309 }
310 printk(KERN_DEBUG "in slow start, the flag1 is %d, the flag2 is %d, the flag3 is %d\n", mytcplte->full_flag_1, mytcplte->full_flag_2, mytcplte->full_flag_3);
311 if ((flag_count >= 2 ) || (tp->mytcp_est_rq > 15) /*|| mytcplte->max_queue_within_t mytcplte_round_max > 80*/)
312// if (mytcplte->full_flag_2 < (mytcplte->full_flag_3*125/100) && mytcplte->full_flag_1 < (mytcplte->full_flag_2*125/100))
313 {
314
315 mytcplte->cwnd_first = tp->snd_cwnd - (mytcplte->full_flag_1 - 10);
316 tp->snd_cwnd = mytcplte->cwnd_first;
317// tp->snd_ssthresh = tcp_mytcplte_ssthresh(tp)/2;
318 tp->snd_ssthresh = mytcplte->cwnd_first/2;
319 printk(KERN_DEBUG "in slow start, full, the snd_cwnd is %d\n", tp->snd_cwnd);
320 }
321// printk(KERN_DEBUG "in slow start, the snd_cwnd is %d\n", tp->snd_cwnd);
322 }
323
324
325 }
326
327
328 if (after(ack, mytcplte->beg_snd_nxt) && !tcp_in_slow_start(tp)){ //瓶颈为有线部分时的处理
329
330 int packets_in_air;
331 int RQ_wire;
332
333 mytcplte->beg_snd_nxt = tp->snd_nxt;
334 mytcplte->time_now = ktime_get_real_ns();
335
336 if ((mytcplte->time_now - mytcplte->time_last)>(100*1000)){
337
338 mytcplte->throughput_now = tp->bytes_acked * 8 * 1000/ ((mytcplte->time_now - mytcplte->time_last));
339
340 packets_in_air = (mytcplte->throughput_now * mytcplte->min_rtt)/(1388*8);
341 RQ_wire = tp->packets_out - tp->mytcp_est_rq - packets_in_air;
342
343 if(RQ_wire < 0){
344 RQ_wire = 0;
345 }
346
347 if(RQ_wire > 10){
348 tp->snd_cwnd = mytcplte->cwnd_last - (RQ_wire - 10);
349 }
350
351 else{
352 tp->snd_cwnd = tp->snd_cwnd;
353 }
354 printk(KERN_DEBUG "the bytes ackes is %llu, time interval is %llu\n", tp->bytes_acked, mytcplte->time_now - mytcplte->time_last);
355 printk(KERN_DEBUG "the throughput is %u\n", mytcplte->throughput_now);
356 printk(KERN_DEBUG "wired bottle, the packets out is %u, min queue is %u, packets in air is %d, the RQ wire is %d\n",tp->packets_out, tp->mytcp_est_rq, packets_in_air, RQ_wire);
357 printk(KERN_DEBUG "111111111111111111111111111, wired bottle, into the cong_avoid, the cwnd is %u\n", tp->snd_cwnd);
358 }
359
360 }
361
362
363
364 if (tcp_in_slow_start(tp)) {
365
366// printk(KERN_DEBUG "into slow_start, the cwnd is %u\n", tp->snd_cwnd);
367// printk(KERN_DEBUG "into slow_start, the ssthresh is %u\n", tp->snd_ssthresh);
368 tcp_slow_start(tp, acked);
369/* if (tp->snd_cwnd > 230){
370 tp->snd_ssthresh = tp->snd_cwnd/2;
371 } */
372 }
373
374// tp->snd_cwnd = 50;
375
376// printk(KERN_DEBUG "111111111111111111111111111, into the cong_avoid, the actual thresh is %u\n", tp->snd_ssthresh);
377 printk(KERN_DEBUG "111111111111111111111111111, into the cong_avoid, the actual cwnd is %u\n", tp->snd_cwnd);
378
379}
380
381
382
383/* Extract info for Tcp socket info provided via netlink. */
384size_t tcp_mytcplte_get_info(struct sock *sk, u32 ext, int *attr,
385 union tcp_cc_info *info)
386{
387 const struct mytcplte *ca = inet_csk_ca(sk);
388
389 if (ext & (1 << (INET_DIAG_MYTCPINFO - 1))) {
390 info->mytcplte.tcpv_enabled = ca->doing_mytcplte_now;
391 info->mytcplte.tcpv_rttcnt = ca->cntRTT;
392 info->mytcplte.tcpv_rtt = ca->baseRTT;
393 info->mytcplte.tcpv_minrtt = ca->minRTT;
394
395
396 *attr = INET_DIAG_MYTCPINFO;
397 return sizeof(struct tcpmytcplte_info);
398 }
399 return 0;
400}
401//EXPORT_SYMBOL_GPL(tcp_mytcplte_get_info);
402
403static struct tcp_congestion_ops tcp_mytcplte __read_mostly = {
404 .init = tcp_mytcplte_init,
405 .ssthresh = tcp_reno_ssthresh,
406 .undo_cwnd = tcp_reno_undo_cwnd,
407 .cong_avoid = tcp_mytcplte_cong_avoid,
408 .pkts_acked = tcp_mytcplte_pkts_acked,
409 .set_state = tcp_mytcplte_state,
410 .cwnd_event = tcp_mytcplte_cwnd_event,
411 .get_info = tcp_mytcplte_get_info,
412
413 .owner = THIS_MODULE,
414 .name = "mytcplte",
415};
416
417static int __init tcp_mytcplte_register(void)
418{
419// BUILD_BUG_ON(sizeof(struct mytcplte) > ICSK_CA_PRIV_SIZE);
420 tcp_register_congestion_control(&tcp_mytcplte);
421 return 0;
422}
423
424static void __exit tcp_mytcplte_unregister(void)
425{
426 tcp_unregister_congestion_control(&tcp_mytcplte);
427}
428
429module_init(tcp_mytcplte_register);
430module_exit(tcp_mytcplte_unregister);
431
432MODULE_AUTHOR("LLG");
433MODULE_LICENSE("GPL");
434MODULE_DESCRIPTION("TCP mytcplte");
435
436
diff --git a/net/ipv4/tcp_mytcplte.h b/net/ipv4/tcp_mytcplte.h
new file mode 100644
index 000000000..0aa2b806a
--- /dev/null
+++ b/net/ipv4/tcp_mytcplte.h
@@ -0,0 +1,73 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * TCP mytcplte congestion control interface
4 */
5#ifndef __TCP_mytcplte_H
6#define __TCP_mytcplte_H 1
7
8//extern int mytcplte_flow_num; //****the num of mytcplte flow*****//
9
10/* mytcplte variables */
11struct mytcplte {
12 u32 beg_snd_nxt; /* right edge during last RTT */
13 u32 beg_snd_cwnd; /* saves the size of the cwnd */
14 u8 doing_mytcplte_now;/* if true, do mytcplte for this RTT */
15// u16 count_i; /* # of RTTs measured within last RTT */
16// u32 RTT_i; /* min of RTTs measured within last RTT (in usec) */
17// u32 RTT_i_last;
18// u32 sum_RTT_i;
19// u32 rtt_of_this_packet;
20// u32 standard_rtt;
21// u32 sum_RTT_i_last;
22 u32 cwnd_last;
23 u16 num_i;
24 u16 cntRTT;
25 u16 cntRTT_2;
26 u16 full_flag_1;
27 u16 full_flag_2;
28 u16 full_flag_3;
29 u16 cong_flag;
30 u64 min_rtt;
31 u64 num_in_pipe;
32// u32 queue_last;
33// u32 queue_now;
34 u16 throughput_now;
35 u16 max_queue_within_t;
36 u16 min_queue_within_t;
37 u16 mytcplte_max_driver_queue;
38 u16 mytcplte_min_driver_queue;
39// u16 max_queue_last;
40 u16 max_queue_first;
41 u16 max_queue_now;
42 u16 cwnd_first;
43 u16 time_start_flag;
44 int min_last;
45 u64 time_last;
46 u64 time_now;
47 u64 time_interval;
48 u16 inflight_2pre;
49 u16 inflight_1pre;
50 u16 inflight_now;
51 int min_inflight;
52 u16 mytcplte_round_count;
53// u16 slow_start_end;
54// u32 slow_start_end_cwnd;
55 u32 minRTT;
56 u32 baseRTT;
57// u16 queue_update;
58// u16 num_i_last;
59// u32 average_RTT_i;
60// u32 average_RTT_i_last;
61// u32 i_interval_stamp; /* the min of all mytcplte RTT measurements seen (in usec) */
62// u16 start_indicator;
63// u16 update;
64};
65
66void tcp_mytcplte_init(struct sock *sk);
67void tcp_mytcplte_state(struct sock *sk, u8 ca_state);
68void tcp_mytcplte_pkts_acked(struct sock *sk, const struct ack_sample *sample);
69void tcp_mytcplte_cwnd_event(struct sock *sk, enum tcp_ca_event event);
70size_t tcp_mytcplte_get_info(struct sock *sk, u32 ext, int *attr,
71 union tcp_cc_info *info);
72
73#endif /* __TCP_mytcplte_H */
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index eefd032bc..b94df074a 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1223,6 +1223,147 @@ INDIRECT_CALLABLE_DECLARE(int ip_queue_xmit(struct sock *sk, struct sk_buff *skb
1223INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)); 1223INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
1224INDIRECT_CALLABLE_DECLARE(void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)); 1224INDIRECT_CALLABLE_DECLARE(void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb));
1225 1225
1226//my fast start//
1227static void update_myfast_start_of_round(struct tcp_sock *tp, struct tcphdr *th)
1228{
1229 tp->myfast_start_of_round_seq = th->seq;
1230 tp->myfast_time_first_of_round_send = tcp_clock_us();
1231 return;
1232}
1233
1234void calc_myfast_BDP(struct tcp_sock *tp)
1235{
1236 tp->myfast_bdp = (tp->myfast_throughput * tp->myfast_min_rtt)/(8*tp->mss_cache);
1237 /*
1238 if (tcp_in_slow_start(tp)&&(tp->myfast_init_wind_flag == 0)){
1239 tp->snd_cwnd = tp->myfast_bdp;
1240 tp->snd_wnd = (tp->myfast_bdp * tp->mss_cache);
1241 tp->myfast_init_wind_flag = 1;
1242 }
1243 */
1244 tp->myfast_calc = 1;
1245// printk(KERN_DEBUG "update myfast bdp, bdp is %u, tp->snd_cwnd is %u, tp->snd_wmd is %u\n",
1246// tp->myfast_bdp, tp->snd_cwnd, tp->snd_wnd);
1247 return;
1248}
1249EXPORT_SYMBOL_GPL(calc_myfast_BDP);
1250
1251
1252static int update_myfast_est_link(struct sk_buff *skb,
1253 struct tcp_sock *tp, struct tcphdr *th)
1254{
1255// u64 time_wait;
1256 u64 t1;
1257 u64 t2;
1258
1259
1260 if (th->syn){
1261 tp->myfast_bytes_sent_count = 0;
1262 }
1263 else if(skb->data_len >= tp->mss_cache){
1264
1265 tp->myfast_bytes_sent_count += skb->data_len;
1266 tp->myfast_new_round_flag = 1;
1267
1268
1269 if ((tp->myfast_bytes_sent_count > (3*(tp->mss_cache)/2)) && (!tp->myfast_cal_bdp_start_flag)){
1270
1271// if (tp->myfast_bytes_sent_count == 0){
1272 update_myfast_start_of_round(tp,th);
1273 tp->myfast_cal_bdp_start_flag = 1;
1274 tp->myfast_bytes_sent_count = skb->data_len;
1275 printk(KERN_DEBUG "start, the mss is %u\n", tp->mss_cache);
1276
1277 }
1278 else if ((tp->myfast_ack_seq > ntohl(tp->myfast_start_of_round_seq)) && tp->myfast_cal_bdp_start_flag){
1279
1280 t1 = tp->myfast_time_end_of_round_send - tp->myfast_time_first_of_round_send;
1281
1282 if(tp->myfast_min_rtt > t1){
1283 tp->myfast_time_wait = tp->myfast_min_rtt - (tp->myfast_time_end_of_round_send - tp->myfast_time_first_of_round_send);
1284 }
1285
1286 else{
1287 tp->myfast_time_wait = 0;
1288 }
1289
1290 t2 = (tcp_clock_us() - tp->myfast_time_end_of_round_send);
1291
1292 if(t2 > tp->myfast_time_wait){
1293 tp->myfast_time_used += (t2 - tp->myfast_time_wait);
1294 }
1295
1296// printk(KERN_DEBUG "t1 is %llu, t2 is %llu, t_wait is %llu\n", t1, t2, tp->myfast_time_wait);
1297
1298 update_myfast_start_of_round(tp,th);
1299
1300 tp->myfast_end_of_round_seq = tp->myfast_seq;///
1301 tp->myfast_flag = 1;
1302
1303// printk(KERN_DEBUG "a new round\n");
1304
1305 tp->myfast_enable_calc = 1;
1306 tp->myfast_init_wind_flag ++;
1307
1308 }
1309
1310 else if (tp->myfast_cal_bdp_start_flag){
1311
1312 if(tp->myfast_time_end_of_round_send == 0 || tp->myfast_flag == 1){
1313 tp->myfast_time_last_send = tp->myfast_time_first_of_round_send;
1314 tp->myfast_flag = 0;
1315// printk(KERN_DEBUG "in send middle of round, if\n");
1316 }
1317 else{
1318 tp->myfast_time_last_send = tp->myfast_time_end_of_round_send;
1319// printk(KERN_DEBUG "in send middle of round, else\n");
1320 }
1321
1322 tp->myfast_seq = th->seq;///
1323 tp->myfast_time_end_of_round_send = tcp_clock_us();
1324// tp->myfast_time_used += (tp->myfast_time_end_of_round_send - tp->myfast_time_last_send);
1325
1326//处理乱序导致的发送间隔过大问题
1327 tp->myfast_send_gap = tp->myfast_time_end_of_round_send - tp->myfast_time_last_send;
1328 if (tp->myfast_send_gap < (tp->myfast_min_rtt/2)){
1329 if (tp->myfast_send_gap > tp->myfast_send_gap_max){
1330 tp->myfast_send_gap_max = tp->myfast_send_gap;
1331 }
1332 tp->myfast_time_used += tp->myfast_send_gap;
1333// printk(KERN_DEBUG "in send middle of round, if, myfast_time_used is %llu\n", tp->myfast_time_used);
1334 }
1335 else{
1336 tp->myfast_time_used += tp->myfast_send_gap_max;
1337// printk(KERN_DEBUG "in send middle of round, else, myfast_time_used is %llu\n", tp->myfast_time_used);
1338 }
1339
1340
1341// printk(KERN_DEBUG "in send middle of round, myfast_time_last_send is %llu\n", tp->myfast_time_last_send);
1342
1343 }
1344
1345// tp->myfast_bytes_sent_count += skb->data_len;
1346/*
1347 if(tp->myfast_enable_calc){
1348 tp->myfast_throughput = (tp->myfast_bytes_sent_count * 8) / tp->myfast_time_used;
1349 printk(KERN_DEBUG "throughput is %u\n", tp->myfast_throughput);
1350 calc_myfast_BDP(tp);
1351 }
1352*/
1353
1354
1355// printk(KERN_DEBUG "myfast_time_first_of_round_send is %llu\n", tp->myfast_time_first_of_round_send);
1356// printk(KERN_DEBUG "myfast_time_end_of_round_send is %llu\n", tp->myfast_time_end_of_round_send);
1357// printk(KERN_DEBUG "myfast_ack_seq is %u\n", tp->myfast_ack_seq);
1358// printk(KERN_DEBUG "myfast_start_of_round_seq is %u\n", ntohl(tp->myfast_start_of_round_seq));
1359// printk(KERN_DEBUG "myfast_time_used is %llu\n", tp->myfast_time_used);
1360
1361
1362 }
1363 return 0;
1364}
1365
1366//my fast start//
1226/* This routine actually transmits TCP packets queued in by 1367/* This routine actually transmits TCP packets queued in by
1227 * tcp_do_sendmsg(). This is used by both the initial 1368 * tcp_do_sendmsg(). This is used by both the initial
1228 * transmission and possible later retransmissions. 1369 * transmission and possible later retransmissions.
@@ -1234,6 +1375,10 @@ INDIRECT_CALLABLE_DECLARE(void tcp_v4_send_check(struct sock *sk, struct sk_buff
1234 * We are working here with either a clone of the original 1375 * We are working here with either a clone of the original
1235 * SKB, or a fresh unique copy made by the retransmit engine. 1376 * SKB, or a fresh unique copy made by the retransmit engine.
1236 */ 1377 */
1378unsigned long myfast_driver_queue_min;
1379unsigned long myfast_round_flag;
1380EXPORT_SYMBOL_GPL(myfast_driver_queue_min);
1381EXPORT_SYMBOL_GPL(myfast_round_flag);
1237static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, 1382static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
1238 int clone_it, gfp_t gfp_mask, u32 rcv_nxt) 1383 int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
1239{ 1384{
@@ -1318,9 +1463,12 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
1318 skb_orphan(skb); 1463 skb_orphan(skb);
1319 skb->sk = sk; 1464 skb->sk = sk;
1320 skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree; 1465 skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
1321 skb_set_hash_from_sk(skb, sk);
1322 refcount_add(skb->truesize, &sk->sk_wmem_alloc); 1466 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
1323 1467
1468 tp->mytcp_wmem_last = refcount_read(&sk->sk_wmem_alloc);
1469 tp->mytcp_wmem_add_flag = 1;
1470
1471
1324 skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm); 1472 skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
1325 1473
1326 /* Build TCP header and checksum it. */ 1474 /* Build TCP header and checksum it. */
@@ -1346,7 +1494,6 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
1346 } 1494 }
1347 } 1495 }
1348 1496
1349 tcp_options_write((__be32 *)(th + 1), tp, &opts);
1350 skb_shinfo(skb)->gso_type = sk->sk_gso_type; 1497 skb_shinfo(skb)->gso_type = sk->sk_gso_type;
1351 if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) { 1498 if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) {
1352 th->window = htons(tcp_select_window(sk)); 1499 th->window = htons(tcp_select_window(sk));
@@ -1357,6 +1504,9 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
1357 */ 1504 */
1358 th->window = htons(min(tp->rcv_wnd, 65535U)); 1505 th->window = htons(min(tp->rcv_wnd, 65535U));
1359 } 1506 }
1507
1508 tcp_options_write((__be32 *)(th + 1), tp, &opts);
1509
1360#ifdef CONFIG_TCP_MD5SIG 1510#ifdef CONFIG_TCP_MD5SIG
1361 /* Calculate the MD5 hash, as we have all we need now */ 1511 /* Calculate the MD5 hash, as we have all we need now */
1362 if (md5) { 1512 if (md5) {
@@ -1387,6 +1537,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
1387 tcp_skb_pcount(skb)); 1537 tcp_skb_pcount(skb));
1388 1538
1389 tp->segs_out += tcp_skb_pcount(skb); 1539 tp->segs_out += tcp_skb_pcount(skb);
1540 skb_set_hash_from_sk(skb, sk);
1390 /* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */ 1541 /* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */
1391 skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb); 1542 skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
1392 skb_shinfo(skb)->gso_size = tcp_skb_mss(skb); 1543 skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
@@ -1403,6 +1554,51 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
1403 inet6_csk_xmit, ip_queue_xmit, 1554 inet6_csk_xmit, ip_queue_xmit,
1404 sk, skb, &inet->cork.fl); 1555 sk, skb, &inet->cork.fl);
1405 1556
1557
1558 tp->myfast_source = ntohs(th->source);
1559 // my fast start //
1560
1561
1562 if (tcp_in_slow_start(tp)){
1563 update_myfast_est_link(skb,tp,th);
1564 }
1565
1566
1567 tp->mytcp_bytes_send += skb->data_len;
1568 printk(KERN_DEBUG "the mytcp_bytes_send is %u\n", tp->mytcp_bytes_send);
1569
1570
1571
1572 if(tp->myfast_calc && (myfast_round_flag ==1) && (tcp_in_slow_start(tp)) && (tp->mytcp_enable_flag == 1)){
1573 if ((myfast_driver_queue_min > 20) && (tp->myfast_reach_thresh != 1)){
1574 tp->myfast_too_large = 1;
1575 tp->myfast_reach_thresh = 1;
1576 tp->snd_cwnd = tp->snd_cwnd/2;
1577 tp->myfast_bdp = tp->myfast_bdp/2;
1578 }
1579
1580// else if((myfast_driver_queue_min == 0)){
1581// tp->myfast_too_large = 0;
1582// tp->snd_cwnd = tp->snd_cwnd + 20;
1583// }
1584
1585// printk(KERN_DEBUG "myfast_too_large is %u\n", tp->myfast_too_large);
1586// printk(KERN_DEBUG "snd_cwnd is %u, myfast_bdp is %u\n", tp->snd_cwnd, tp->myfast_bdp);
1587 }
1588
1589
1590
1591
1592
1593// printk(KERN_DEBUG "the cwnd is %u\n", tp->snd_cwnd);
1594// printk(KERN_DEBUG "the myfast_driver_queue_min is %lu\n", myfast_driver_queue_min);
1595// printk(KERN_DEBUG "the min RTT is %u\n", tp->myfast_min_rtt);
1596// printk(KERN_DEBUG "the packets out is %u\n", tp->packets_out);
1597 printk(KERN_DEBUG "tcp_output.c TCP sends segs, the source port is %d, the dest port is %d, the cwnd is %u, the output data len is %u\n",
1598 ntohs(th->source), ntohs(th->dest), tp->snd_cwnd, skb->data_len);
1599
1600
1601
1406 if (unlikely(err > 0)) { 1602 if (unlikely(err > 0)) {
1407 tcp_enter_cwr(sk); 1603 tcp_enter_cwr(sk);
1408 err = net_xmit_eval(err); 1604 err = net_xmit_eval(err);
@@ -2499,13 +2695,34 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
2499{ 2695{
2500 unsigned long limit; 2696 unsigned long limit;
2501 2697
2698//myfast
2699 unsigned long limit_faststart;
2700// unsigned long limit_tsq;
2701 struct tcp_sock *tp = tcp_sk(sk);
2702
2502 limit = max_t(unsigned long, 2703 limit = max_t(unsigned long,
2503 2 * skb->truesize, 2704 2 * skb->truesize,
2504 sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift)); 2705 sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift));
2505 if (sk->sk_pacing_status == SK_PACING_NONE) 2706 if (sk->sk_pacing_status == SK_PACING_NONE)
2506 limit = min_t(unsigned long, limit, 2707 limit = min_t(unsigned long, limit,
2507 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes)); 2708 sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
2508 limit <<= factor; 2709 limit <<= factor;
2710
2711//mytcp
2712 tp->mytcp_wmem = refcount_read(&sk->sk_wmem_alloc);
2713 if ((tp->mytcp_wmem_add_flag == 1) && (tp->mytcp_wmem_last > tp->mytcp_wmem)){
2714 tp->mytcp_round_flag++;
2715 tp->mytcp_wmem_add_flag = 0;
2716 tp->mytcp_est_rq = (tp->mytcp_wmem / 2304);
2717 printk(KERN_DEBUG "tcp_small_queue_check, the tp->mytcp_wmem is %ld, tp->mytcp_est_rq is %ld\n", tp->mytcp_wmem, tp->mytcp_est_rq);
2718 }
2719//mytcp
2720
2721 if (limit < (43800)){
2722 limit = 43800;
2723 }
2724
2725// printk(KERN_DEBUG "the limit is %ld\n", limit);
2509 2726
2510 if (static_branch_unlikely(&tcp_tx_delay_enabled) && 2727 if (static_branch_unlikely(&tcp_tx_delay_enabled) &&
2511 tcp_sk(sk)->tcp_tx_delay) { 2728 tcp_sk(sk)->tcp_tx_delay) {
@@ -2517,7 +2734,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
2517 * do_div(extra_bytes, USEC_PER_SEC/2) is replaced by a right shift. 2734 * do_div(extra_bytes, USEC_PER_SEC/2) is replaced by a right shift.
2518 */ 2735 */
2519 extra_bytes >>= (20 - 1); 2736 extra_bytes >>= (20 - 1);
2520 limit += extra_bytes; 2737 limit += extra_bytes; //myfast
2521 } 2738 }
2522 if (refcount_read(&sk->sk_wmem_alloc) > limit) { 2739 if (refcount_read(&sk->sk_wmem_alloc) > limit) {
2523 /* Always send skb if rtx queue is empty. 2740 /* Always send skb if rtx queue is empty.
diff --git a/net/ipv4/udp_bpf.c b/net/ipv4/udp_bpf.c
index 69c9663f9..487d74126 100644
--- a/net/ipv4/udp_bpf.c
+++ b/net/ipv4/udp_bpf.c
@@ -4,6 +4,7 @@
4#include <linux/skmsg.h> 4#include <linux/skmsg.h>
5#include <net/sock.h> 5#include <net/sock.h>
6#include <net/udp.h> 6#include <net/udp.h>
7#include <linux/bpf.h>
7 8
8enum { 9enum {
9 UDP_BPF_IPV4, 10 UDP_BPF_IPV4,