PageRenderTime 45ms CodeModel.GetById 18ms app.highlight 22ms RepoModel.GetById 1ms app.codeStats 0ms

/net/ipv4/tcp_cubic.c

http://github.com/mirrors/linux
C | 541 lines | 337 code | 78 blank | 126 comment | 61 complexity | 26eb40385e1db8e470334344f11a3c5d MD5 | raw file
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * TCP CUBIC: Binary Increase Congestion control for TCP v2.3
  4 * Home page:
  5 *      http://netsrv.csc.ncsu.edu/twiki/bin/view/Main/BIC
  6 * This is from the implementation of CUBIC TCP in
  7 * Sangtae Ha, Injong Rhee and Lisong Xu,
  8 *  "CUBIC: A New TCP-Friendly High-Speed TCP Variant"
  9 *  in ACM SIGOPS Operating System Review, July 2008.
 10 * Available from:
 11 *  http://netsrv.csc.ncsu.edu/export/cubic_a_new_tcp_2008.pdf
 12 *
 13 * CUBIC integrates a new slow start algorithm, called HyStart.
 14 * The details of HyStart are presented in
 15 *  Sangtae Ha and Injong Rhee,
 16 *  "Taming the Elephants: New TCP Slow Start", NCSU TechReport 2008.
 17 * Available from:
 18 *  http://netsrv.csc.ncsu.edu/export/hystart_techreport_2008.pdf
 19 *
 20 * All testing results are available from:
 21 * http://netsrv.csc.ncsu.edu/wiki/index.php/TCP_Testing
 22 *
 23 * Unless CUBIC is enabled and congestion window is large
 24 * this behaves the same as the original Reno.
 25 */
 26
 27#include <linux/mm.h>
 28#include <linux/module.h>
 29#include <linux/math64.h>
 30#include <net/tcp.h>
 31
 32#define BICTCP_BETA_SCALE    1024	/* Scale factor beta calculation
 33					 * max_cwnd = snd_cwnd * beta
 34					 */
 35#define	BICTCP_HZ		10	/* BIC HZ 2^10 = 1024 */
 36
 37/* Two methods of hybrid slow start */
 38#define HYSTART_ACK_TRAIN	0x1
 39#define HYSTART_DELAY		0x2
 40
 41/* Number of delay samples for detecting the increase of delay */
 42#define HYSTART_MIN_SAMPLES	8
 43#define HYSTART_DELAY_MIN	(4000U)	/* 4 ms */
 44#define HYSTART_DELAY_MAX	(16000U)	/* 16 ms */
 45#define HYSTART_DELAY_THRESH(x)	clamp(x, HYSTART_DELAY_MIN, HYSTART_DELAY_MAX)
 46
 47static int fast_convergence __read_mostly = 1;
 48static int beta __read_mostly = 717;	/* = 717/1024 (BICTCP_BETA_SCALE) */
 49static int initial_ssthresh __read_mostly;
 50static int bic_scale __read_mostly = 41;
 51static int tcp_friendliness __read_mostly = 1;
 52
 53static int hystart __read_mostly = 1;
 54static int hystart_detect __read_mostly = HYSTART_ACK_TRAIN | HYSTART_DELAY;
 55static int hystart_low_window __read_mostly = 16;
 56static int hystart_ack_delta_us __read_mostly = 2000;
 57
 58static u32 cube_rtt_scale __read_mostly;
 59static u32 beta_scale __read_mostly;
 60static u64 cube_factor __read_mostly;
 61
 62/* Note parameters that are used for precomputing scale factors are read-only */
 63module_param(fast_convergence, int, 0644);
 64MODULE_PARM_DESC(fast_convergence, "turn on/off fast convergence");
 65module_param(beta, int, 0644);
 66MODULE_PARM_DESC(beta, "beta for multiplicative increase");
 67module_param(initial_ssthresh, int, 0644);
 68MODULE_PARM_DESC(initial_ssthresh, "initial value of slow start threshold");
 69module_param(bic_scale, int, 0444);
 70MODULE_PARM_DESC(bic_scale, "scale (scaled by 1024) value for bic function (bic_scale/1024)");
 71module_param(tcp_friendliness, int, 0644);
 72MODULE_PARM_DESC(tcp_friendliness, "turn on/off tcp friendliness");
 73module_param(hystart, int, 0644);
 74MODULE_PARM_DESC(hystart, "turn on/off hybrid slow start algorithm");
 75module_param(hystart_detect, int, 0644);
 76MODULE_PARM_DESC(hystart_detect, "hybrid slow start detection mechanisms"
 77		 " 1: packet-train 2: delay 3: both packet-train and delay");
 78module_param(hystart_low_window, int, 0644);
 79MODULE_PARM_DESC(hystart_low_window, "lower bound cwnd for hybrid slow start");
 80module_param(hystart_ack_delta_us, int, 0644);
 81MODULE_PARM_DESC(hystart_ack_delta_us, "spacing between ack's indicating train (usecs)");
 82
 83/* BIC TCP Parameters */
 84struct bictcp {
 85	u32	cnt;		/* increase cwnd by 1 after ACKs */
 86	u32	last_max_cwnd;	/* last maximum snd_cwnd */
 87	u32	last_cwnd;	/* the last snd_cwnd */
 88	u32	last_time;	/* time when updated last_cwnd */
 89	u32	bic_origin_point;/* origin point of bic function */
 90	u32	bic_K;		/* time to origin point
 91				   from the beginning of the current epoch */
 92	u32	delay_min;	/* min delay (usec) */
 93	u32	epoch_start;	/* beginning of an epoch */
 94	u32	ack_cnt;	/* number of acks */
 95	u32	tcp_cwnd;	/* estimated tcp cwnd */
 96	u16	unused;
 97	u8	sample_cnt;	/* number of samples to decide curr_rtt */
 98	u8	found;		/* the exit point is found? */
 99	u32	round_start;	/* beginning of each round */
100	u32	end_seq;	/* end_seq of the round */
101	u32	last_ack;	/* last time when the ACK spacing is close */
102	u32	curr_rtt;	/* the minimum rtt of current round */
103};
104
105static inline void bictcp_reset(struct bictcp *ca)
106{
107	ca->cnt = 0;
108	ca->last_max_cwnd = 0;
109	ca->last_cwnd = 0;
110	ca->last_time = 0;
111	ca->bic_origin_point = 0;
112	ca->bic_K = 0;
113	ca->delay_min = 0;
114	ca->epoch_start = 0;
115	ca->ack_cnt = 0;
116	ca->tcp_cwnd = 0;
117	ca->found = 0;
118}
119
120static inline u32 bictcp_clock_us(const struct sock *sk)
121{
122	return tcp_sk(sk)->tcp_mstamp;
123}
124
125static inline void bictcp_hystart_reset(struct sock *sk)
126{
127	struct tcp_sock *tp = tcp_sk(sk);
128	struct bictcp *ca = inet_csk_ca(sk);
129
130	ca->round_start = ca->last_ack = bictcp_clock_us(sk);
131	ca->end_seq = tp->snd_nxt;
132	ca->curr_rtt = ~0U;
133	ca->sample_cnt = 0;
134}
135
136static void bictcp_init(struct sock *sk)
137{
138	struct bictcp *ca = inet_csk_ca(sk);
139
140	bictcp_reset(ca);
141
142	if (hystart)
143		bictcp_hystart_reset(sk);
144
145	if (!hystart && initial_ssthresh)
146		tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
147}
148
149static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event)
150{
151	if (event == CA_EVENT_TX_START) {
152		struct bictcp *ca = inet_csk_ca(sk);
153		u32 now = tcp_jiffies32;
154		s32 delta;
155
156		delta = now - tcp_sk(sk)->lsndtime;
157
158		/* We were application limited (idle) for a while.
159		 * Shift epoch_start to keep cwnd growth to cubic curve.
160		 */
161		if (ca->epoch_start && delta > 0) {
162			ca->epoch_start += delta;
163			if (after(ca->epoch_start, now))
164				ca->epoch_start = now;
165		}
166		return;
167	}
168}
169
170/* calculate the cubic root of x using a table lookup followed by one
171 * Newton-Raphson iteration.
172 * Avg err ~= 0.195%
173 */
174static u32 cubic_root(u64 a)
175{
176	u32 x, b, shift;
177	/*
178	 * cbrt(x) MSB values for x MSB values in [0..63].
179	 * Precomputed then refined by hand - Willy Tarreau
180	 *
181	 * For x in [0..63],
182	 *   v = cbrt(x << 18) - 1
183	 *   cbrt(x) = (v[x] + 10) >> 6
184	 */
185	static const u8 v[] = {
186		/* 0x00 */    0,   54,   54,   54,  118,  118,  118,  118,
187		/* 0x08 */  123,  129,  134,  138,  143,  147,  151,  156,
188		/* 0x10 */  157,  161,  164,  168,  170,  173,  176,  179,
189		/* 0x18 */  181,  185,  187,  190,  192,  194,  197,  199,
190		/* 0x20 */  200,  202,  204,  206,  209,  211,  213,  215,
191		/* 0x28 */  217,  219,  221,  222,  224,  225,  227,  229,
192		/* 0x30 */  231,  232,  234,  236,  237,  239,  240,  242,
193		/* 0x38 */  244,  245,  246,  248,  250,  251,  252,  254,
194	};
195
196	b = fls64(a);
197	if (b < 7) {
198		/* a in [0..63] */
199		return ((u32)v[(u32)a] + 35) >> 6;
200	}
201
202	b = ((b * 84) >> 8) - 1;
203	shift = (a >> (b * 3));
204
205	x = ((u32)(((u32)v[shift] + 10) << b)) >> 6;
206
207	/*
208	 * Newton-Raphson iteration
209	 *                         2
210	 * x    = ( 2 * x  +  a / x  ) / 3
211	 *  k+1          k         k
212	 */
213	x = (2 * x + (u32)div64_u64(a, (u64)x * (u64)(x - 1)));
214	x = ((x * 341) >> 10);
215	return x;
216}
217
218/*
219 * Compute congestion window to use.
220 */
221static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked)
222{
223	u32 delta, bic_target, max_cnt;
224	u64 offs, t;
225
226	ca->ack_cnt += acked;	/* count the number of ACKed packets */
227
228	if (ca->last_cwnd == cwnd &&
229	    (s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32)
230		return;
231
232	/* The CUBIC function can update ca->cnt at most once per jiffy.
233	 * On all cwnd reduction events, ca->epoch_start is set to 0,
234	 * which will force a recalculation of ca->cnt.
235	 */
236	if (ca->epoch_start && tcp_jiffies32 == ca->last_time)
237		goto tcp_friendliness;
238
239	ca->last_cwnd = cwnd;
240	ca->last_time = tcp_jiffies32;
241
242	if (ca->epoch_start == 0) {
243		ca->epoch_start = tcp_jiffies32;	/* record beginning */
244		ca->ack_cnt = acked;			/* start counting */
245		ca->tcp_cwnd = cwnd;			/* syn with cubic */
246
247		if (ca->last_max_cwnd <= cwnd) {
248			ca->bic_K = 0;
249			ca->bic_origin_point = cwnd;
250		} else {
251			/* Compute new K based on
252			 * (wmax-cwnd) * (srtt>>3 / HZ) / c * 2^(3*bictcp_HZ)
253			 */
254			ca->bic_K = cubic_root(cube_factor
255					       * (ca->last_max_cwnd - cwnd));
256			ca->bic_origin_point = ca->last_max_cwnd;
257		}
258	}
259
260	/* cubic function - calc*/
261	/* calculate c * time^3 / rtt,
262	 *  while considering overflow in calculation of time^3
263	 * (so time^3 is done by using 64 bit)
264	 * and without the support of division of 64bit numbers
265	 * (so all divisions are done by using 32 bit)
266	 *  also NOTE the unit of those veriables
267	 *	  time  = (t - K) / 2^bictcp_HZ
268	 *	  c = bic_scale >> 10
269	 * rtt  = (srtt >> 3) / HZ
270	 * !!! The following code does not have overflow problems,
271	 * if the cwnd < 1 million packets !!!
272	 */
273
274	t = (s32)(tcp_jiffies32 - ca->epoch_start);
275	t += usecs_to_jiffies(ca->delay_min);
276	/* change the unit from HZ to bictcp_HZ */
277	t <<= BICTCP_HZ;
278	do_div(t, HZ);
279
280	if (t < ca->bic_K)		/* t - K */
281		offs = ca->bic_K - t;
282	else
283		offs = t - ca->bic_K;
284
285	/* c/rtt * (t-K)^3 */
286	delta = (cube_rtt_scale * offs * offs * offs) >> (10+3*BICTCP_HZ);
287	if (t < ca->bic_K)                            /* below origin*/
288		bic_target = ca->bic_origin_point - delta;
289	else                                          /* above origin*/
290		bic_target = ca->bic_origin_point + delta;
291
292	/* cubic function - calc bictcp_cnt*/
293	if (bic_target > cwnd) {
294		ca->cnt = cwnd / (bic_target - cwnd);
295	} else {
296		ca->cnt = 100 * cwnd;              /* very small increment*/
297	}
298
299	/*
300	 * The initial growth of cubic function may be too conservative
301	 * when the available bandwidth is still unknown.
302	 */
303	if (ca->last_max_cwnd == 0 && ca->cnt > 20)
304		ca->cnt = 20;	/* increase cwnd 5% per RTT */
305
306tcp_friendliness:
307	/* TCP Friendly */
308	if (tcp_friendliness) {
309		u32 scale = beta_scale;
310
311		delta = (cwnd * scale) >> 3;
312		while (ca->ack_cnt > delta) {		/* update tcp cwnd */
313			ca->ack_cnt -= delta;
314			ca->tcp_cwnd++;
315		}
316
317		if (ca->tcp_cwnd > cwnd) {	/* if bic is slower than tcp */
318			delta = ca->tcp_cwnd - cwnd;
319			max_cnt = cwnd / delta;
320			if (ca->cnt > max_cnt)
321				ca->cnt = max_cnt;
322		}
323	}
324
325	/* The maximum rate of cwnd increase CUBIC allows is 1 packet per
326	 * 2 packets ACKed, meaning cwnd grows at 1.5x per RTT.
327	 */
328	ca->cnt = max(ca->cnt, 2U);
329}
330
331static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
332{
333	struct tcp_sock *tp = tcp_sk(sk);
334	struct bictcp *ca = inet_csk_ca(sk);
335
336	if (!tcp_is_cwnd_limited(sk))
337		return;
338
339	if (tcp_in_slow_start(tp)) {
340		if (hystart && after(ack, ca->end_seq))
341			bictcp_hystart_reset(sk);
342		acked = tcp_slow_start(tp, acked);
343		if (!acked)
344			return;
345	}
346	bictcp_update(ca, tp->snd_cwnd, acked);
347	tcp_cong_avoid_ai(tp, ca->cnt, acked);
348}
349
350static u32 bictcp_recalc_ssthresh(struct sock *sk)
351{
352	const struct tcp_sock *tp = tcp_sk(sk);
353	struct bictcp *ca = inet_csk_ca(sk);
354
355	ca->epoch_start = 0;	/* end of epoch */
356
357	/* Wmax and fast convergence */
358	if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence)
359		ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta))
360			/ (2 * BICTCP_BETA_SCALE);
361	else
362		ca->last_max_cwnd = tp->snd_cwnd;
363
364	return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
365}
366
367static void bictcp_state(struct sock *sk, u8 new_state)
368{
369	if (new_state == TCP_CA_Loss) {
370		bictcp_reset(inet_csk_ca(sk));
371		bictcp_hystart_reset(sk);
372	}
373}
374
375/* Account for TSO/GRO delays.
376 * Otherwise short RTT flows could get too small ssthresh, since during
377 * slow start we begin with small TSO packets and ca->delay_min would
378 * not account for long aggregation delay when TSO packets get bigger.
379 * Ideally even with a very small RTT we would like to have at least one
380 * TSO packet being sent and received by GRO, and another one in qdisc layer.
381 * We apply another 100% factor because @rate is doubled at this point.
382 * We cap the cushion to 1ms.
383 */
384static u32 hystart_ack_delay(struct sock *sk)
385{
386	unsigned long rate;
387
388	rate = READ_ONCE(sk->sk_pacing_rate);
389	if (!rate)
390		return 0;
391	return min_t(u64, USEC_PER_MSEC,
392		     div64_ul((u64)GSO_MAX_SIZE * 4 * USEC_PER_SEC, rate));
393}
394
395static void hystart_update(struct sock *sk, u32 delay)
396{
397	struct tcp_sock *tp = tcp_sk(sk);
398	struct bictcp *ca = inet_csk_ca(sk);
399	u32 threshold;
400
401	if (hystart_detect & HYSTART_ACK_TRAIN) {
402		u32 now = bictcp_clock_us(sk);
403
404		/* first detection parameter - ack-train detection */
405		if ((s32)(now - ca->last_ack) <= hystart_ack_delta_us) {
406			ca->last_ack = now;
407
408			threshold = ca->delay_min + hystart_ack_delay(sk);
409
410			/* Hystart ack train triggers if we get ack past
411			 * ca->delay_min/2.
412			 * Pacing might have delayed packets up to RTT/2
413			 * during slow start.
414			 */
415			if (sk->sk_pacing_status == SK_PACING_NONE)
416				threshold >>= 1;
417
418			if ((s32)(now - ca->round_start) > threshold) {
419				ca->found = 1;
420				pr_debug("hystart_ack_train (%u > %u) delay_min %u (+ ack_delay %u) cwnd %u\n",
421					 now - ca->round_start, threshold,
422					 ca->delay_min, hystart_ack_delay(sk), tp->snd_cwnd);
423				NET_INC_STATS(sock_net(sk),
424					      LINUX_MIB_TCPHYSTARTTRAINDETECT);
425				NET_ADD_STATS(sock_net(sk),
426					      LINUX_MIB_TCPHYSTARTTRAINCWND,
427					      tp->snd_cwnd);
428				tp->snd_ssthresh = tp->snd_cwnd;
429			}
430		}
431	}
432
433	if (hystart_detect & HYSTART_DELAY) {
434		/* obtain the minimum delay of more than sampling packets */
435		if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
436			if (ca->curr_rtt > delay)
437				ca->curr_rtt = delay;
438
439			ca->sample_cnt++;
440		} else {
441			if (ca->curr_rtt > ca->delay_min +
442			    HYSTART_DELAY_THRESH(ca->delay_min >> 3)) {
443				ca->found = 1;
444				NET_INC_STATS(sock_net(sk),
445					      LINUX_MIB_TCPHYSTARTDELAYDETECT);
446				NET_ADD_STATS(sock_net(sk),
447					      LINUX_MIB_TCPHYSTARTDELAYCWND,
448					      tp->snd_cwnd);
449				tp->snd_ssthresh = tp->snd_cwnd;
450			}
451		}
452	}
453}
454
455static void bictcp_acked(struct sock *sk, const struct ack_sample *sample)
456{
457	const struct tcp_sock *tp = tcp_sk(sk);
458	struct bictcp *ca = inet_csk_ca(sk);
459	u32 delay;
460
461	/* Some calls are for duplicates without timetamps */
462	if (sample->rtt_us < 0)
463		return;
464
465	/* Discard delay samples right after fast recovery */
466	if (ca->epoch_start && (s32)(tcp_jiffies32 - ca->epoch_start) < HZ)
467		return;
468
469	delay = sample->rtt_us;
470	if (delay == 0)
471		delay = 1;
472
473	/* first time call or link delay decreases */
474	if (ca->delay_min == 0 || ca->delay_min > delay)
475		ca->delay_min = delay;
476
477	/* hystart triggers when cwnd is larger than some threshold */
478	if (!ca->found && tcp_in_slow_start(tp) && hystart &&
479	    tp->snd_cwnd >= hystart_low_window)
480		hystart_update(sk, delay);
481}
482
483static struct tcp_congestion_ops cubictcp __read_mostly = {
484	.init		= bictcp_init,
485	.ssthresh	= bictcp_recalc_ssthresh,
486	.cong_avoid	= bictcp_cong_avoid,
487	.set_state	= bictcp_state,
488	.undo_cwnd	= tcp_reno_undo_cwnd,
489	.cwnd_event	= bictcp_cwnd_event,
490	.pkts_acked     = bictcp_acked,
491	.owner		= THIS_MODULE,
492	.name		= "cubic",
493};
494
495static int __init cubictcp_register(void)
496{
497	BUILD_BUG_ON(sizeof(struct bictcp) > ICSK_CA_PRIV_SIZE);
498
499	/* Precompute a bunch of the scaling factors that are used per-packet
500	 * based on SRTT of 100ms
501	 */
502
503	beta_scale = 8*(BICTCP_BETA_SCALE+beta) / 3
504		/ (BICTCP_BETA_SCALE - beta);
505
506	cube_rtt_scale = (bic_scale * 10);	/* 1024*c/rtt */
507
508	/* calculate the "K" for (wmax-cwnd) = c/rtt * K^3
509	 *  so K = cubic_root( (wmax-cwnd)*rtt/c )
510	 * the unit of K is bictcp_HZ=2^10, not HZ
511	 *
512	 *  c = bic_scale >> 10
513	 *  rtt = 100ms
514	 *
515	 * the following code has been designed and tested for
516	 * cwnd < 1 million packets
517	 * RTT < 100 seconds
518	 * HZ < 1,000,00  (corresponding to 10 nano-second)
519	 */
520
521	/* 1/c * 2^2*bictcp_HZ * srtt */
522	cube_factor = 1ull << (10+3*BICTCP_HZ); /* 2^40 */
523
524	/* divide by bic_scale and by constant Srtt (100ms) */
525	do_div(cube_factor, bic_scale * 10);
526
527	return tcp_register_congestion_control(&cubictcp);
528}
529
530static void __exit cubictcp_unregister(void)
531{
532	tcp_unregister_congestion_control(&cubictcp);
533}
534
535module_init(cubictcp_register);
536module_exit(cubictcp_unregister);
537
538MODULE_AUTHOR("Sangtae Ha, Stephen Hemminger");
539MODULE_LICENSE("GPL");
540MODULE_DESCRIPTION("CUBIC TCP");
541MODULE_VERSION("2.3");