Skip to content

Commit

Permalink
Cubic (#1074)
Browse files Browse the repository at this point in the history
  • Loading branch information
ddragana authored Jan 18, 2021
1 parent ed6c7b2 commit 92233a1
Show file tree
Hide file tree
Showing 9 changed files with 1,018 additions and 243 deletions.
563 changes: 336 additions & 227 deletions neqo-transport/src/cc/classic_cc.rs

Large diffs are not rendered by default.

203 changes: 203 additions & 0 deletions neqo-transport/src/cc/cubic.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,203 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.

#![deny(clippy::pedantic)]

use std::fmt::{self, Display};
use std::time::{Duration, Instant};

use crate::cc::{classic_cc::WindowAdjustment, MAX_DATAGRAM_SIZE_F64};
use neqo_common::qtrace;
use std::convert::TryFrom;

// CUBIC congestion control

// C is a constant fixed to determine the aggressiveness of window
// increase in high BDP networks.
pub const CUBIC_C: f64 = 0.4;
pub const CUBIC_ALPHA: f64 = 3.0 * (1.0 - 0.7) / (1.0 + 0.7);

// CUBIC_BETA = 0.7;
pub const CUBIC_BETA_USIZE_QUOTIENT: usize = 7;
pub const CUBIC_BETA_USIZE_DIVISOR: usize = 10;

/// The fast convergence ratio further reduces the congestion window when a congestion event
/// occurs before reaching the previous `W_max`.
pub const CUBIC_FAST_CONVERGENCE: f64 = 0.85; // (1.0 + CUBIC_BETA) / 2.0;

/// The minimum number of multiples of the datagram size that need
/// to be received to cause an increase in the congestion window.
/// When there is no loss, Cubic can return to exponential increase, but
/// this value reduces the magnitude of the resulting growth by a constant factor.
/// A value of 1.0 would mean a return to the rate used in slow start.
const EXPONENTIAL_GROWTH_REDUCTION: f64 = 2.0;

fn convert_to_f64(v: usize) -> f64 {
assert!(v < (1 << 53));
let mut f_64 = f64::try_from(u32::try_from(v >> 21).unwrap()).unwrap();
f_64 *= 2_097_152.0; // f_64 <<= 21
f_64 += f64::try_from(u32::try_from(v & 0x1f_ffff).unwrap()).unwrap();
f_64
}

#[derive(Debug)]
pub struct Cubic {
last_max_cwnd: f64,
estimated_tcp_cwnd: f64,
k: f64,
w_max: f64,
ca_epoch_start: Option<Instant>,
last_phase_was_tcp: bool,
tcp_acked_bytes: f64,
}

impl Default for Cubic {
fn default() -> Self {
Self {
last_max_cwnd: 0.0,
estimated_tcp_cwnd: 0.0,
k: 0.0,
w_max: 0.0,
ca_epoch_start: None,
last_phase_was_tcp: false,
tcp_acked_bytes: 0.0,
}
}
}

impl Display for Cubic {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Cubic [last_max_cwnd: {}, k: {}, w_max: {}, ca_epoch_start: {:?}]",
self.last_max_cwnd, self.k, self.w_max, self.ca_epoch_start
)?;
Ok(())
}
}

#[allow(clippy::doc_markdown)]
impl Cubic {
/// Original equations is:
/// K = cubic_root(W_max*(1-beta_cubic)/C) (Eq. 2 RFC8312)
/// W_max is number of segments of the maximum segment size (MSS).
///
/// K is actually the time that W_cubic(t) = C*(t-K)^3 + W_max (Eq. 1) would
/// take to increase to W_max. We use bytes not MSS units, therefore this
/// equation will be: W_cubic(t) = C*MSS*(t-K)^3 + W_max.
///
/// From that equation we can calculate K as:
/// K = cubic_root((W_max - W_cubic) / C / MSS);
fn calc_k(&self, curr_cwnd: f64) -> f64 {
((self.w_max - curr_cwnd) / CUBIC_C / MAX_DATAGRAM_SIZE_F64).cbrt()
}

/// W_cubic(t) = C*(t-K)^3 + W_max (Eq. 1)
/// t is relative to the start of the congestion avoidance phase and it is in seconds.
fn w_cubic(&self, t: f64) -> f64 {
CUBIC_C * (t - self.k).powi(3) * MAX_DATAGRAM_SIZE_F64 + self.w_max
}

fn start_epoch(&mut self, curr_cwnd_f64: f64, new_acked_f64: f64, now: Instant) {
self.ca_epoch_start = Some(now);
// reset tcp_acked_bytes and estimated_tcp_cwnd;
self.tcp_acked_bytes = new_acked_f64;
self.estimated_tcp_cwnd = curr_cwnd_f64;
if self.last_max_cwnd <= curr_cwnd_f64 {
self.w_max = curr_cwnd_f64;
self.k = 0.0;
} else {
self.w_max = self.last_max_cwnd;
self.k = self.calc_k(curr_cwnd_f64);
}
qtrace!([self], "New epoch");
}
}

impl WindowAdjustment for Cubic {
// This is because of the cast in the last line from f64 to usize.
#[allow(clippy::cast_possible_truncation)]
#[allow(clippy::cast_sign_loss)]
fn bytes_for_cwnd_increase(
&mut self,
curr_cwnd: usize,
new_acked_bytes: usize,
min_rtt: Duration,
now: Instant,
) -> usize {
let curr_cwnd_f64 = convert_to_f64(curr_cwnd);
let new_acked_f64 = convert_to_f64(new_acked_bytes);
if self.ca_epoch_start.is_none() {
// This is a start of a new congestion avoidance phase.
self.start_epoch(curr_cwnd_f64, new_acked_f64, now);
} else {
self.tcp_acked_bytes += new_acked_f64;
}

let time_ca = self
.ca_epoch_start
.map_or(min_rtt, |t| now + min_rtt - t)
.as_secs_f64();
let target_cubic = self.w_cubic(time_ca);

let tcp_cnt = self.estimated_tcp_cwnd / CUBIC_ALPHA;
while self.tcp_acked_bytes > tcp_cnt {
self.tcp_acked_bytes -= tcp_cnt;
self.estimated_tcp_cwnd += MAX_DATAGRAM_SIZE_F64;
}

let target_cwnd = target_cubic.max(self.estimated_tcp_cwnd);

// Calculate the number of bytes that would need to be acknowledged for an increase
// of `MAX_DATAGRAM_SIZE` to match the increase of `target - cwnd / cwnd` as defined
// in the specification (Sections 4.4 and 4.5).
// The amount of data required therefore reduces asymptotically as the target increases.
// If the target is not significantly higher than the congestion window, require a very large
// amount of acknowledged data (effectively block increases).
let mut acked_to_increase =
MAX_DATAGRAM_SIZE_F64 * curr_cwnd_f64 / (target_cwnd - curr_cwnd_f64).max(1.0);

// Limit increase to max 1 MSS per EXPONENTIAL_GROWTH_REDUCTION ack packets.
// This effectively limits target_cwnd to (1 + 1 / EXPONENTIAL_GROWTH_REDUCTION) cwnd.
acked_to_increase =
acked_to_increase.max(EXPONENTIAL_GROWTH_REDUCTION * MAX_DATAGRAM_SIZE_F64);
acked_to_increase as usize
}

fn reduce_cwnd(&mut self, curr_cwnd: usize, acked_bytes: usize) -> (usize, usize) {
let curr_cwnd_f64 = convert_to_f64(curr_cwnd);
// Fast Convergence
// If congestion event occurs before the maximum congestion window before the last congestion event,
// we reduce the the maximum congestion window and thereby W_max.
// check cwnd + MAX_DATAGRAM_SIZE instead of cwnd because with cwnd in bytes, cwnd may be slightly off.
self.last_max_cwnd = if curr_cwnd_f64 + MAX_DATAGRAM_SIZE_F64 < self.last_max_cwnd {
curr_cwnd_f64 * CUBIC_FAST_CONVERGENCE
} else {
curr_cwnd_f64
};
self.ca_epoch_start = None;
(
curr_cwnd * CUBIC_BETA_USIZE_QUOTIENT / CUBIC_BETA_USIZE_DIVISOR,
acked_bytes * CUBIC_BETA_USIZE_QUOTIENT / CUBIC_BETA_USIZE_DIVISOR,
)
}

fn on_app_limited(&mut self) {
// Reset ca_epoch_start. Let it start again when the congestion controller
// exits the app-limited period.
self.ca_epoch_start = None;
}

#[cfg(test)]
fn last_max_cwnd(&self) -> f64 {
self.last_max_cwnd
}

#[cfg(test)]
fn set_last_max_cwnd(&mut self, last_max_cwnd: f64) {
self.last_max_cwnd = last_max_cwnd;
}
}
11 changes: 9 additions & 2 deletions neqo-transport/src/cc/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,16 @@ use std::fmt::{Debug, Display};
use std::time::{Duration, Instant};

mod classic_cc;
mod cubic;
mod new_reno;

pub use classic_cc::ClassicCongestionControl;
pub use classic_cc::{CWND_INITIAL_PKTS, CWND_MIN};
pub use classic_cc::{CWND_INITIAL, CWND_INITIAL_PKTS, CWND_MIN};
pub use cubic::Cubic;
pub use new_reno::NewReno;

pub const MAX_DATAGRAM_SIZE: usize = PATH_MTU_V6;
pub const MAX_DATAGRAM_SIZE_F64: f64 = 1337.0;

pub trait CongestionControl: Display + Debug {
fn set_qlog(&mut self, qlog: NeqoQlog);
Expand All @@ -32,7 +35,7 @@ pub trait CongestionControl: Display + Debug {

fn cwnd_avail(&self) -> usize;

fn on_packets_acked(&mut self, acked_pkts: &[SentPacket]);
fn on_packets_acked(&mut self, acked_pkts: &[SentPacket], min_rtt: Duration, now: Instant);

fn on_packets_lost(
&mut self,
Expand All @@ -52,4 +55,8 @@ pub trait CongestionControl: Display + Debug {
#[derive(Debug, Copy, Clone)]
pub enum CongestionControlAlgorithm {
NewReno,
Cubic,
}

#[cfg(test)]
mod tests;
31 changes: 21 additions & 10 deletions neqo-transport/src/cc/new_reno.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@

use std::fmt::{self, Display};

use crate::cc::{classic_cc::WindowAdjustment, MAX_DATAGRAM_SIZE};
use neqo_common::qinfo;
use crate::cc::classic_cc::WindowAdjustment;
use std::time::{Duration, Instant};

#[derive(Debug)]
pub struct NewReno {}
Expand All @@ -29,16 +29,27 @@ impl Display for NewReno {
}

impl WindowAdjustment for NewReno {
fn on_packets_acked(&mut self, mut curr_cwnd: usize, mut acked_bytes: usize) -> (usize, usize) {
if acked_bytes >= curr_cwnd {
acked_bytes -= curr_cwnd;
curr_cwnd += MAX_DATAGRAM_SIZE;
qinfo!([self], "congestion avoidance += {}", MAX_DATAGRAM_SIZE);
}
(curr_cwnd, acked_bytes)
fn bytes_for_cwnd_increase(
&mut self,
curr_cwnd: usize,
_new_acked_bytes: usize,
_min_rtt: Duration,
_now: Instant,
) -> usize {
curr_cwnd
}

fn on_congestion_event(&mut self, curr_cwnd: usize, acked_bytes: usize) -> (usize, usize) {
fn reduce_cwnd(&mut self, curr_cwnd: usize, acked_bytes: usize) -> (usize, usize) {
(curr_cwnd / 2, acked_bytes / 2)
}

fn on_app_limited(&mut self) {}

#[cfg(test)]
fn last_max_cwnd(&self) -> f64 {
0.0
}

#[cfg(test)]
fn set_last_max_cwnd(&mut self, _last_max_cwnd: f64) {}
}
Loading

0 comments on commit 92233a1

Please sign in to comment.