diff --git a/Cargo.toml b/Cargo.toml index 848f8ed..787f180 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,37 +1,4 @@ -[package] -name = "chuck" -version = "0.0.1" -edition = "2021" -authors = ["arqu "] -license = "Apache-2.0/MIT" -repository = "https://github.com/n0-computer/chuck" -description = "iroh test tools" -rust-version = "1.65" +[workspace] +members = ["chuck"] -[dependencies] -anyhow = { version = "1", features = ["backtrace"] } -clap = { version = "4.0.9", features = ["derive"] } -tokio = { version = "1", features = ["full"] } -serde = { version = "1", features = ["derive"] } -bincode = "1.3.3" -tempfile = "3.4.0" -futures = "0.3.21" -bytes = "1.1.0" -rand = "0.8.5" - -axum = "0.6.2" -tower = { version = "0.4", features = ["util"] } -tower-http = { version = "0.3.0", features = ["fs", "trace"] } -tracing = "0.1" -tracing-subscriber = { version = "0.3", features = ["env-filter"] } -axum-server = { version = "0.3", features = ["tls-rustls"] } -reqwest = { version = "0.11.10", default-features = false, features = ["rustls-tls"] } - -#iroh-api = { git = "https://github.com/n0-computer/iroh", rev = "0e06dd7f70e21955f735ecd54464c7527fbf72c1"} -#iroh-util = { git = "https://github.com/n0-computer/iroh", rev = "0e06dd7f70e21955f735ecd54464c7527fbf72c1"} -#iroh-share = { git = "https://github.com/n0-computer/iroh", rev = "0e06dd7f70e21955f735ecd54464c7527fbf72c1"} -#iroh-one = { git = "https://github.com/n0-computer/iroh", rev = "0e06dd7f70e21955f735ecd54464c7527fbf72c1"} -#iroh-rpc-types = { git = "https://github.com/n0-computer/iroh", rev = "0e06dd7f70e21955f735ecd54464c7527fbf72c1", default-features = false} -#iroh-rpc-client = { git = "https://github.com/n0-computer/iroh", rev = "0e06dd7f70e21955f735ecd54464c7527fbf72c1", default-features = false} - -iroh = { git = "https://github.com/n0-computer/iroh", rev = "2149bc8c6819b044833f5af0b8fdac567ef93650", default-features = false } \ No newline at end of file +resolver = "2" \ No newline at end of file diff --git a/chuck/Cargo.toml b/chuck/Cargo.toml new file mode 100644 index 0000000..848f8ed --- /dev/null +++ b/chuck/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "chuck" +version = "0.0.1" +edition = "2021" +authors = ["arqu "] +license = "Apache-2.0/MIT" +repository = "https://github.com/n0-computer/chuck" +description = "iroh test tools" +rust-version = "1.65" + +[dependencies] +anyhow = { version = "1", features = ["backtrace"] } +clap = { version = "4.0.9", features = ["derive"] } +tokio = { version = "1", features = ["full"] } +serde = { version = "1", features = ["derive"] } +bincode = "1.3.3" +tempfile = "3.4.0" +futures = "0.3.21" +bytes = "1.1.0" +rand = "0.8.5" + +axum = "0.6.2" +tower = { version = "0.4", features = ["util"] } +tower-http = { version = "0.3.0", features = ["fs", "trace"] } +tracing = "0.1" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } +axum-server = { version = "0.3", features = ["tls-rustls"] } +reqwest = { version = "0.11.10", default-features = false, features = ["rustls-tls"] } + +#iroh-api = { git = "https://github.com/n0-computer/iroh", rev = "0e06dd7f70e21955f735ecd54464c7527fbf72c1"} +#iroh-util = { git = "https://github.com/n0-computer/iroh", rev = "0e06dd7f70e21955f735ecd54464c7527fbf72c1"} +#iroh-share = { git = "https://github.com/n0-computer/iroh", rev = "0e06dd7f70e21955f735ecd54464c7527fbf72c1"} +#iroh-one = { git = "https://github.com/n0-computer/iroh", rev = "0e06dd7f70e21955f735ecd54464c7527fbf72c1"} +#iroh-rpc-types = { git = "https://github.com/n0-computer/iroh", rev = "0e06dd7f70e21955f735ecd54464c7527fbf72c1", default-features = false} +#iroh-rpc-client = { git = "https://github.com/n0-computer/iroh", rev = "0e06dd7f70e21955f735ecd54464c7527fbf72c1", default-features = false} + +iroh = { git = "https://github.com/n0-computer/iroh", rev = "2149bc8c6819b044833f5af0b8fdac567ef93650", default-features = false } \ No newline at end of file diff --git a/src/lib.rs b/chuck/src/lib.rs similarity index 100% rename from src/lib.rs rename to chuck/src/lib.rs diff --git a/src/main.rs b/chuck/src/main.rs similarity index 100% rename from src/main.rs rename to chuck/src/main.rs diff --git a/src/service.rs b/chuck/src/service.rs similarity index 100% rename from src/service.rs rename to chuck/src/service.rs diff --git a/netsim/README.md b/netsim/README.md index 2a7d0d4..04efaae 100644 --- a/netsim/README.md +++ b/netsim/README.md @@ -1,15 +1,29 @@ # Netsim ## Requirements +- `sudo ./setup.sh` +- Linux machine, tested on ubuntu 20.04 & 22.04 -- `ubuntu` -- `python3` -- `sudo apt install mininet` -- `sudo apt install openvswitch-testcontroller` -- `/var/lib/netsim` -- `dd if=/dev/urandom of=128MB.bin bs=64M count=2 iflag=fullblock` -- `dd if=/dev/urandom of=1GB.bin bs=64M count=16 iflag=fullblock` -- `iperf` +### Warning: +- Requires root access +- Dirties your system python dependencies + +## Locally + +You can also do this locally on a linux machine. With a few modifications you don’t even need to do much as root: + +- clone [**iroh**](https://github.com/n0-computer/iroh) and [**chuck**](https://github.com/n0-computer/chuck) into the same directory. +- Check `chuck/netsim/scripts/ubuntu_deps.sh`to find what the system dependencies are +- `cd chuck/netsim` +- Create a virtual environment: + - `python -m venv .venv` + - `source .venv/bin/activate` + - `./scripts/python_deps.sh` +- Run `./scripts/project_deps` to configure the project structure and generate fixtures +- Continue with in the iroh repo, build iroh and copy it into chuck/netsim/bins. I found building in release mode to be sufficient, not release-optimized like what CI does. `cargo build --release && cp ./target/release/iroh ../chuck/netsim/bins/` + - Do not run kill, these are services managed by systemd + - Run `main.py` as root, using the python from your virtualenv: + - `./venv/bin/python main.py --integrations sims/mini` ## Run @@ -19,4 +33,4 @@ ## Notes - `https://github.com/mininet/mininet/wiki/Introduction-to-Mininet` -- `sudo kill -9 $(pgrep ovs)` \ No newline at end of file +- `sudo kill -9 $(pgrep ovs)` - when stuck with weird errors due to the process failing mid way on a simulation \ No newline at end of file diff --git a/netsim/cleanup.sh b/netsim/cleanup.sh index 8343eee..6579c3f 100755 --- a/netsim/cleanup.sh +++ b/netsim/cleanup.sh @@ -4,4 +4,15 @@ rm -rf viz/* sudo mn --clean sudo kill -9 $(pgrep iroh) sudo kill -9 $(pgrep derper) -sudo kill -9 $(pgrep ovs) \ No newline at end of file +sudo kill -9 $(pgrep ovs) + + +# we use shorter interface names due to size constraints, which means we need to have a custom cleanup too +echo "Cleaning up interfaces" +links=$(ip link show | egrep -o '([-_.[:alnum:]]+-e[[:digit:]]+)') +for link in $links; do + # Run your command with $link as a parameter + echo "Deleting $link" + # Example command: sudo ip link delete $link + sudo ip link delete $link +done \ No newline at end of file diff --git a/netsim/link.py b/netsim/link.py deleted file mode 100644 index 148419d..0000000 --- a/netsim/link.py +++ /dev/null @@ -1,583 +0,0 @@ -""" -link.py: interface and link abstractions for mininet - -It seems useful to bundle functionality for interfaces into a single -class. - -Also it seems useful to enable the possibility of multiple flavors of -links, including: - -- simple veth pairs -- tunneled links -- patchable links (which can be disconnected and reconnected via a patchbay) -- link simulators (e.g. wireless) - -Basic division of labor: - - Nodes: know how to execute commands - Intfs: know how to configure themselves - Links: know how to connect nodes together - -Intf: basic interface object that can configure itself -TCIntf: interface with bandwidth limiting and delay via tc - -Link: basic link class for creating veth pairs -""" - -import re - -from mininet.log import info, error, debug -from mininet.util import makeIntfPair - -# Make pylint happy: -# pylint: disable=too-many-arguments - - -class Intf( object ): - - "Basic interface object that can configure itself." - - def __init__( self, name, node=None, port=None, link=None, - mac=None, **params ): - """name: interface name (e.g. h1-eth0) - node: owning node (where this intf most likely lives) - link: parent link if we're part of a link - other arguments are passed to config()""" - self.node = node - self.name = name - self.link = link - self.mac = mac - self.ip, self.prefixLen = None, None - - # if interface is lo, we know the ip is 127.0.0.1. - # This saves an ifconfig command per node - if self.name == 'lo': - self.ip = '127.0.0.1' - self.prefixLen = 8 - # Add to node (and move ourselves if necessary ) - if node: - moveIntfFn = params.pop( 'moveIntfFn', None ) - if moveIntfFn: - node.addIntf( self, port=port, moveIntfFn=moveIntfFn ) - else: - node.addIntf( self, port=port ) - # Save params for future reference - self.params = params - self.config( **params ) - - def cmd( self, *args, **kwargs ): - "Run a command in our owning node" - return self.node.cmd( *args, **kwargs ) - - def ifconfig( self, *args ): - "Configure ourselves using ifconfig" - return self.cmd( 'ifconfig', self.name, *args ) - - def setIP( self, ipstr, prefixLen=None ): - """Set our IP address""" - # This is a sign that we should perhaps rethink our prefix - # mechanism and/or the way we specify IP addresses - if '/' in ipstr: - self.ip, self.prefixLen = ipstr.split( '/' ) - return self.ifconfig( ipstr, 'up' ) - else: - if prefixLen is None: - raise Exception( 'No prefix length set for IP address %s' - % ( ipstr, ) ) - self.ip, self.prefixLen = ipstr, prefixLen - return self.ifconfig( '%s/%s' % ( ipstr, prefixLen ) ) - - def setMAC( self, macstr ): - """Set the MAC address for an interface. - macstr: MAC address as string""" - self.mac = macstr - return ( self.ifconfig( 'down' ) + - self.ifconfig( 'hw', 'ether', macstr ) + - self.ifconfig( 'up' ) ) - - _ipMatchRegex = re.compile( r'\d+\.\d+\.\d+\.\d+' ) - _macMatchRegex = re.compile( r'..:..:..:..:..:..' ) - - def updateIP( self ): - "Return updated IP address based on ifconfig" - # use pexec instead of node.cmd so that we dont read - # backgrounded output from the cli. - ifconfig, _err, _exitCode = self.node.pexec( - 'ifconfig %s' % self.name ) - ips = self._ipMatchRegex.findall( ifconfig ) - self.ip = ips[ 0 ] if ips else None - return self.ip - - def updateMAC( self ): - "Return updated MAC address based on ifconfig" - ifconfig = self.ifconfig() - macs = self._macMatchRegex.findall( ifconfig ) - self.mac = macs[ 0 ] if macs else None - return self.mac - - # Instead of updating ip and mac separately, - # use one ifconfig call to do it simultaneously. - # This saves an ifconfig command, which improves performance. - - def updateAddr( self ): - "Return IP address and MAC address based on ifconfig." - ifconfig = self.ifconfig() - ips = self._ipMatchRegex.findall( ifconfig ) - macs = self._macMatchRegex.findall( ifconfig ) - self.ip = ips[ 0 ] if ips else None - self.mac = macs[ 0 ] if macs else None - return self.ip, self.mac - - def IP( self ): - "Return IP address" - return self.ip - - def MAC( self ): - "Return MAC address" - return self.mac - - def isUp( self, setUp=False ): - "Return whether interface is up" - if setUp: - cmdOutput = self.ifconfig( 'up' ) - # no output indicates success - if cmdOutput: - error( "Error setting %s up: %s " % ( self.name, cmdOutput ) ) - return False - else: - return True - else: - return "UP" in self.ifconfig() - - def rename( self, newname ): - "Rename interface" - if self.node and self.name in self.node.nameToIntf: - # rename intf in node's nameToIntf - self.node.nameToIntf[newname] = self.node.nameToIntf.pop(self.name) - self.ifconfig( 'down' ) - result = self.cmd( 'ip link set', self.name, 'name', newname ) - self.name = newname - self.ifconfig( 'up' ) - return result - - # The reason why we configure things in this way is so - # That the parameters can be listed and documented in - # the config method. - # Dealing with subclasses and superclasses is slightly - # annoying, but at least the information is there! - - def setParam( self, results, method, **param ): - """Internal method: configure a *single* parameter - results: dict of results to update - method: config method name - param: arg=value (ignore if value=None) - value may also be list or dict""" - name, value = list( param.items() )[ 0 ] - f = getattr( self, method, None ) - if not f or value is None: - return None - if isinstance( value, list ): - result = f( *value ) - elif isinstance( value, dict ): - result = f( **value ) - else: - result = f( value ) - results[ name ] = result - return result - - def config( self, mac=None, ip=None, ifconfig=None, - up=True, **_params ): - """Configure Node according to (optional) parameters: - mac: MAC address - ip: IP address - ifconfig: arbitrary interface configuration - Subclasses should override this method and call - the parent class's config(**params)""" - # If we were overriding this method, we would call - # the superclass config method here as follows: - # r = Parent.config( **params ) - r = {} - self.setParam( r, 'setMAC', mac=mac ) - self.setParam( r, 'setIP', ip=ip ) - self.setParam( r, 'isUp', up=up ) - self.setParam( r, 'ifconfig', ifconfig=ifconfig ) - return r - - def delete( self ): - "Delete interface" - self.cmd( 'ip link del ' + self.name ) - # We used to do this, but it slows us down: - # if self.node.inNamespace: - # Link may have been dumped into root NS - # quietRun( 'ip link del ' + self.name ) - self.node.delIntf( self ) - self.link = None - - def status( self ): - "Return intf status as a string" - links, _err, _result = self.node.pexec( 'ip link show' ) - if self.name in links: - return "OK" - else: - return "MISSING" - - def __repr__( self ): - return '<%s %s>' % ( self.__class__.__name__, self.name ) - - def __str__( self ): - return self.name - - -class TCIntf( Intf ): - """Interface customized by tc (traffic control) utility - Allows specification of bandwidth limits (various methods) - as well as delay, loss and max queue length""" - - # The parameters we use seem to work reasonably up to 1 Gb/sec - # For higher data rates, we will probably need to change them. - bwParamMax = 100000000 - - def bwCmds( self, bw=None, speedup=0, use_hfsc=False, use_tbf=False, - latency_ms=None, enable_ecn=False, enable_red=False ): - "Return tc commands to set bandwidth" - - cmds, parent = [], ' root ' - - if bw and ( bw < 0 or bw > self.bwParamMax ): - error( 'Bandwidth limit', bw, 'is outside supported range 0..%d' - % self.bwParamMax, '- ignoring\n' ) - elif bw is not None: - # BL: this seems a bit brittle... - if ( speedup > 0 and - self.node.name[0:1] == 's' ): - bw = speedup - # This may not be correct - we should look more closely - # at the semantics of burst (and cburst) to make sure we - # are specifying the correct sizes. For now I have used - # the same settings we had in the mininet-hifi code. - if use_hfsc: - cmds += [ '%s qdisc add dev %s root handle 5:0 hfsc default 1', - '%s class add dev %s parent 5:0 classid 5:1 hfsc sc ' - + 'rate %fMbit ul rate %fMbit' % ( bw, bw ) ] - elif use_tbf: - if latency_ms is None: - latency_ms = 15.0 * 8 / bw - cmds += [ '%s qdisc add dev %s root handle 5: tbf ' + - 'rate %fMbit burst 15000 latency %fms' % - ( bw, latency_ms ) ] - else: - cmds += [ '%s qdisc add dev %s root handle 5:0 htb default 1', - '%s class add dev %s parent 5:0 classid 5:1 htb ' + - 'rate %fMbit burst 15k' % bw ] - parent = ' parent 5:1 ' - - # ECN or RED - if enable_ecn: - cmds += [ '%s qdisc add dev %s' + parent + - 'handle 6: red limit 1000000 ' + - 'min 30000 max 35000 avpkt 1500 ' + - 'burst 20 ' + - 'bandwidth %fmbit probability 1 ecn' % bw ] - parent = ' parent 6: ' - elif enable_red: - cmds += [ '%s qdisc add dev %s' + parent + - 'handle 6: red limit 1000000 ' + - 'min 30000 max 35000 avpkt 1500 ' + - 'burst 20 ' + - 'bandwidth %fmbit probability 1' % bw ] - parent = ' parent 6: ' - return cmds, parent - - @staticmethod - def delayCmds( parent, delay=None, jitter=None, - loss=None, max_queue_size=None ): - "Internal method: return tc commands for delay and loss" - cmds = [] - if loss and ( loss < 0 or loss > 100 ): - error( 'Bad loss percentage', loss, '%%\n' ) - else: - # Delay/jitter/loss/max queue size - netemargs = '%s%s%s%s' % ( - 'delay %s ' % delay if delay is not None else '', - '%s ' % jitter if jitter is not None else '', - 'loss %.5f ' % loss if (loss is not None and loss > 0) else '', - 'limit %d' % max_queue_size if max_queue_size is not None - else '' ) - if netemargs: - cmds = [ '%s qdisc add dev %s ' + parent + - ' handle 10: netem ' + - netemargs ] - parent = ' parent 10:1 ' - return cmds, parent - - def tc( self, cmd, tc='tc' ): - "Execute tc command for our interface" - c = cmd % (tc, self) # Add in tc command and our name - debug(" *** executing command: %s\n" % c) - return self.cmd( c ) - - # pylint: disable=arguments-differ - def config( self, bw=None, delay=None, jitter=None, loss=None, - gro=False, txo=True, rxo=True, - speedup=0, use_hfsc=False, use_tbf=False, - latency_ms=None, enable_ecn=False, enable_red=False, - max_queue_size=None, **params ): - """Configure the port and set its properties. - bw: bandwidth in b/s (e.g. '10m') - delay: transmit delay (e.g. '1ms' ) - jitter: jitter (e.g. '1ms') - loss: loss (e.g. '1%' ) - gro: enable GRO (False) - txo: enable transmit checksum offload (True) - rxo: enable receive checksum offload (True) - speedup: experimental switch-side bw option - use_hfsc: use HFSC scheduling - use_tbf: use TBF scheduling - latency_ms: TBF latency parameter - enable_ecn: enable ECN (False) - enable_red: enable RED (False) - max_queue_size: queue limit parameter for netem""" - - # Support old names for parameters - gro = not params.pop( 'disable_gro', not gro ) - - result = Intf.config( self, **params) - - def on( isOn ): - "Helper method: bool -> 'on'/'off'" - return 'on' if isOn else 'off' - - # Set offload parameters with ethool - self.cmd( 'ethtool -K', self, - 'gro', on( gro ), - 'tx', on( txo ), - 'rx', on( rxo ) ) - - # Optimization: return if nothing else to configure - # Question: what happens if we want to reset things? - if ( bw is None and not delay and not loss - and max_queue_size is None ): - return None - - # Clear existing configuration - tcoutput = self.tc( '%s qdisc show dev %s' ) - if "priomap" not in tcoutput and "noqueue" not in tcoutput: - cmds = [ '%s qdisc del dev %s root' ] - else: - cmds = [] - - # Bandwidth limits via various methods - bwcmds, parent = self.bwCmds( bw=bw, speedup=speedup, - use_hfsc=use_hfsc, use_tbf=use_tbf, - latency_ms=latency_ms, - enable_ecn=enable_ecn, - enable_red=enable_red ) - cmds += bwcmds - - # Delay/jitter/loss/max_queue_size using netem - delaycmds, parent = self.delayCmds( delay=delay, jitter=jitter, - loss=loss, - max_queue_size=max_queue_size, - parent=parent ) - cmds += delaycmds - - # Ugly but functional: display configuration info - stuff = ( ( [ '%.2fMbit' % bw ] if bw is not None else [] ) + - ( [ '%s delay' % delay ] if delay is not None else [] ) + - ( [ '%s jitter' % jitter ] if jitter is not None else [] ) + - ( ['%.5f%% loss' % loss ] if loss is not None else [] ) + - ( [ 'ECN' ] if enable_ecn else [ 'RED' ] - if enable_red else [] ) ) - info( '(' + ' '.join( stuff ) + ') ' ) - - # Execute all the commands in our node - debug("at map stage w/cmds: %s\n" % cmds) - tcoutputs = [ self.tc(cmd) for cmd in cmds ] - for output in tcoutputs: - if output != '': - error( "*** Error: %s" % output ) - debug( "cmds:", cmds, '\n' ) - debug( "outputs:", tcoutputs, '\n' ) - result[ 'tcoutputs'] = tcoutputs - result[ 'parent' ] = parent - - return result - - -class Link( object ): - - """A basic link is just a veth pair. - Other types of links could be tunnels, link emulators, etc..""" - - # pylint: disable=too-many-branches - def __init__( self, node1, node2, port1=None, port2=None, - intfName1=None, intfName2=None, addr1=None, addr2=None, - intf=Intf, cls1=None, cls2=None, params1=None, - params2=None, fast=True, **params ): - """Create veth link to another node, making two new interfaces. - node1: first node - node2: second node - port1: node1 port number (optional) - port2: node2 port number (optional) - intf: default interface class/constructor - cls1, cls2: optional interface-specific constructors - intfName1: node1 interface name (optional) - intfName2: node2 interface name (optional) - params1: parameters for interface 1 (optional) - params2: parameters for interface 2 (optional) - **params: additional parameters for both interfaces""" - - # This is a bit awkward; it seems that having everything in - # params is more orthogonal, but being able to specify - # in-line arguments is more convenient! So we support both. - params1 = dict( params1 ) if params1 else {} - params2 = dict( params2 ) if params2 else {} - if port1 is not None: - params1[ 'port' ] = port1 - if port2 is not None: - params2[ 'port' ] = port2 - if 'port' not in params1: - params1[ 'port' ] = node1.newPort() - if 'port' not in params2: - params2[ 'port' ] = node2.newPort() - if not intfName1: - intfName1 = self.intfName( node1, params1[ 'port' ] ) - if not intfName2: - intfName2 = self.intfName( node2, params2[ 'port' ] ) - - # Update with remaining parameter list - params1.update( params ) - params2.update( params ) - - self.fast = fast - if fast: - params1.setdefault( 'moveIntfFn', self._ignore ) - params2.setdefault( 'moveIntfFn', self._ignore ) - self.makeIntfPair( intfName1, intfName2, addr1, addr2, - node1, node2, deleteIntfs=False ) - else: - self.makeIntfPair( intfName1, intfName2, addr1, addr2 ) - - if not cls1: - cls1 = intf - if not cls2: - cls2 = intf - - intf1 = cls1( name=intfName1, node=node1, - link=self, mac=addr1, **params1 ) - intf2 = cls2( name=intfName2, node=node2, - link=self, mac=addr2, **params2 ) - - # All we are is dust in the wind, and our two interfaces - self.intf1, self.intf2 = intf1, intf2 - - # pylint: enable=too-many-branches - - @staticmethod - def _ignore( *args, **kwargs ): - "Ignore any arguments" - pass - - def intfName( self, node, n ): - "Construct a canonical interface name node-ethN for interface n." - # Leave this as an instance method for now - assert self - return node.name + '-eth' + repr( n ) - - @classmethod - def makeIntfPair( cls, intfname1, intfname2, addr1=None, addr2=None, - node1=None, node2=None, deleteIntfs=True ): - """Create pair of interfaces - intfname1: name for interface 1 - intfname2: name for interface 2 - addr1: MAC address for interface 1 (optional) - addr2: MAC address for interface 2 (optional) - node1: home node for interface 1 (optional) - node2: home node for interface 2 (optional) - (override this method [and possibly delete()] - to change link type)""" - # Leave this as a class method for now - assert cls - return makeIntfPair( intfname1, intfname2, addr1, addr2, node1, node2, - deleteIntfs=deleteIntfs ) - - def delete( self ): - "Delete this link" - self.intf1.delete() - self.intf1 = None - self.intf2.delete() - self.intf2 = None - - def stop( self ): - "Override to stop and clean up link as needed" - self.delete() - - def status( self ): - "Return link status as a string" - return "(%s %s)" % ( self.intf1.status(), self.intf2.status() ) - - def __str__( self ): - return '%s<->%s' % ( self.intf1, self.intf2 ) - - -class OVSIntf( Intf ): - "Patch interface on an OVSSwitch" - - def ifconfig( self, *args ): - cmd = ' '.join( args ) - if cmd == 'up': - # OVSIntf is always up - return - else: - raise Exception( 'OVSIntf cannot do ifconfig ' + cmd ) - - -class OVSLink( Link ): - """Link that makes patch links between OVSSwitches - Warning: in testing we have found that no more - than ~64 OVS patch links should be used in row.""" - - def __init__( self, node1, node2, **kwargs ): - "See Link.__init__() for options" - try: - OVSSwitch - except NameError: - # pylint: disable=import-outside-toplevel,cyclic-import - from mininet.node import OVSSwitch - self.isPatchLink = False - if ( isinstance( node1, OVSSwitch ) and - isinstance( node2, OVSSwitch ) ): - self.isPatchLink = True - kwargs.update( cls1=OVSIntf, cls2=OVSIntf ) - Link.__init__( self, node1, node2, **kwargs ) - - # pylint: disable=arguments-differ, signature-differs - def makeIntfPair( self, *args, **kwargs ): - "Usually delegated to OVSSwitch" - if self.isPatchLink: - return None, None - else: - return Link.makeIntfPair( *args, **kwargs ) - - -class TCLink( Link ): - "Link with TC interfaces" - def __init__( self, *args, **kwargs): - kwargs.setdefault( 'cls1', TCIntf ) - kwargs.setdefault( 'cls2', TCIntf ) - Link.__init__( self, *args, **kwargs) - - -class TCULink( TCLink ): - """TCLink with default settings optimized for UserSwitch - (txo=rxo=0/False). Unfortunately with recent Linux kernels, - enabling TX and RX checksum offload on veth pairs doesn't work - well with UserSwitch: either it gets terrible performance or - TCP packets with bad checksums are generated, forwarded, and - *dropped* due to having bad checksums! OVS and LinuxBridge seem - to cope with this somehow, but it is likely to be an issue with - many software Ethernet bridges.""" - - def __init__( self, *args, **kwargs ): - kwargs.update( txo=False, rxo=False ) - TCLink.__init__( self, *args, **kwargs ) diff --git a/netsim/main.py b/netsim/main.py index 0f2ea2c..0ce8dae 100644 --- a/netsim/main.py +++ b/netsim/main.py @@ -1,257 +1,296 @@ import argparse +import concurrent.futures +import glob import json -import subprocess +import os +import sys import tempfile import time -import os +from mininet.log import setLogLevel, info from mininet.net import Mininet -from sniff import Sniffer -from link import TCLink -from mininet.log import setLogLevel -from netsim_parser import stats_parser -from netsim_parser import integration_parser -from network import StarTopo -from process_sniff import run_viz +from net.link import TCLink +from net.network import StarTopo +from parsing.netsim import process_logs, process_integration_logs +from sniffer.sniff import Sniffer +from sniffer.process import run_viz +from util import cleanup_tmp_dirs, eject TIMEOUT = 60 * 5 -def logs_on_error(nodes, prefix, code=1, message=None): - node_counts = {} + +def setup_env_vars(prefix, node_name, temp_dir, debug=False): + env_vars = os.environ.copy() + env_vars["RUST_LOG_STYLE"] = "never" + env_vars["SSLKEYLOGFILE"] = f"./logs/keylog_{prefix}_{node_name}.txt" + env_vars["IROH_DATA_DIR"] = f"{temp_dir}" + if debug: + env_vars["RUST_LOG"] = "debug" + if not "RUST_LOG" in env_vars: + env_vars["RUST_LOG"] = "warn" + env_vars["RUST_LOG"] += ",iroh_net::magicsock::node_map::endpoint=trace" + return env_vars + + +def parse_node_params(node, prefix, node_params, runner_id): + node_params = {} + wait_time = node.get("wait", 1) + for _ in range(wait_time): + time.sleep(1) + for i in range(int(node["count"])): + node_name = f'{node["name"]}_{i}_r{runner_id}' + with open(f"logs/{prefix}__{node_name}.txt", "r") as f: + for line in f: + if node["param_parser"] == "iroh_ticket" and line.startswith( + "All-in-one ticket" + ): + node_params[node_name] = line[ + len("All-in-one ticket: ") : + ].strip() + break + return node_params + + +def terminate_processes(p_box): + for p in p_box: + p.terminate() + + +def monitor_short_processes(p_short_box, prefix): + process_errors = [] + for _ in range(TIMEOUT): + time.sleep(1) + if not any(p.poll() is None for (_, p) in p_short_box): + break + for node_name, p in p_short_box: + result = p.poll() + if result is None: + p.terminate() + process_errors.append(f"Process timeout: {prefix} for node {node_name}") + elif result != 0: + process_errors.append( + f"Process failed: {prefix} with exit code {result} for node {node_name}" + ) + return process_errors + + +def handle_connection_strategy(node, node_counts, i, runner_id, node_ips, node_params): + cmd = node["cmd"] + if "param" in node: + if node["param"] == "id": + cmd = cmd % i + strategy = node["connect"]["strategy"] + if strategy in ("plain", "plain_with_id", "params"): + node_name = node["connect"]["node"] + if not (node_name in node_counts): + raise ValueError(f"Node not found for: {node_name}") + cnt = node_counts[node_name] + id = i % cnt + connect_to = f"{node_name}_{id}_r{runner_id}" + if connect_to not in node_ips: + raise ValueError(f"Connecting node not found for: {connect_to}") + if strategy == "plain": + ip = node_ips[connect_to] + return cmd % ip + if strategy == "plain_with_id": + ip = node_ips[connect_to] + return cmd % (ip, id) + if strategy == "params": + param = node_params[connect_to] + return cmd % param + return cmd + + +def execute_node_command(cmd, prefix, node_name, n, env_vars): + log_path = f"logs/{prefix}__{node_name}.txt" + with open(log_path, "w+") as f: + f.write(f"cmd: {cmd}\n\n") + f.flush() + return n.popen(cmd, stdout=f, stderr=f, shell=True, env=env_vars) + + +def get_node_ips(net, nodes, runner_id): + node_ips = {} for node in nodes: - node_counts[node['name']] = int(node['count']) - for i in range(int(node['count'])): - node_name = '%s_%d' % (node['name'], i) - log_name= 'logs/%s__%s.txt' % (prefix, node_name) - if os.path.isfile(log_name): - print('\n\n[INFO] Log file: %s' % log_name) - f = open(log_name, 'r') - lines = f.readlines() - for line in lines: - print('[INFO][%s__%s] %s' % (prefix, node_name, line.rstrip())) - else: - print('[WARN] log file missing: %s' % log_name) - print('[ERROR] Process has failed with code:', code) - if message: - print('[ERROR] Message:', message) - -def run(nodes, prefix, args, debug=False, visualize=False): - integration = args.integration - topo = StarTopo(nodes=nodes) - net = Mininet(topo = topo, waitConnected=True, link=TCLink) - net.start() + for i in range(int(node["count"])): + node_name = f'{node["name"]}_{i}_r{runner_id}' + n = net.get(node_name) + node_ips[node_name] = n.IP() + return node_ips + +def prep_net(net, prefix, sniff): sniffer = Sniffer(net=net, output="logs/" + prefix + ".pcap") ti = sniffer.get_topoinfo() - - print( "Testing network connectivity" ) + info("Testing network connectivity") net.pingAll() - - print("Topo:", json.dumps(ti, indent=4)) - # if args.sniff or visualize: - # print( "Attaching sniffer" ) - # sniffer.start() - # f = open("logs/" + prefix + ".topo.json", "w+") - # f.write(json.dumps(ti, indent=4)) - # f.close() - - time.sleep(1) - env_vars = os.environ.copy() - if debug: - env_vars['RUST_LOG'] = 'debug' - if not 'RUST_LOG' in env_vars: - env_vars['RUST_LOG'] = 'warn' - # magicsock::endpoint required for iroh integration tests - env_vars['RUST_LOG'] += ",iroh_net::magicsock::node_map::endpoint=trace" - + info("Topology:", json.dumps(ti, indent=4)) + if sniff: + info("Attaching sniffer") + sniffer.start() + with open(f"logs/{prefix}.topo.json", "w+") as f: + f.write(json.dumps(ti, indent=4)) + return sniffer - p_box = [] - p_short_box = [] - node_counts = {} - node_ips = {} - node_params = {} +def run_case(nodes, runner_id, prefix, args, debug=False, visualize=False): + topo = StarTopo(nodes=nodes, runner_id=runner_id) + net = Mininet(topo=topo, waitConnected=True, link=TCLink) + net.start() + sniffer = prep_net(net, prefix, args.sniff | visualize) + p_box, p_short_box = [], [] temp_dirs = [] + node_counts = {node["name"]: int(node["count"]) for node in nodes} + node_ips = get_node_ips(net, nodes, runner_id) + node_params = {} + for node in nodes: - node_counts[node['name']] = int(node['count']) - for i in range(int(node['count'])): - node_name = '%s_%d' % (node['name'], i) - f = open('logs/%s__%s.txt' % (prefix, node_name), 'w+') + for i in range(int(node["count"])): + node_name = f'{node["name"]}_{i}_r{runner_id}' n = net.get(node_name) - node_ips[node_name] = n.IP() - cmd = node['cmd'] - if 'param' in node: - if node['param'] == 'id': - cmd = cmd % i - if node['connect']['strategy'] == 'plain': - cnt = node_counts[node['connect']['node']] - id = i % cnt - connect_to = '%s_%d' % (node['connect']['node'], id) - if not (connect_to in node_ips): - logs_on_error(nodes, prefix) - cleanup_tmp_dirs(temp_dirs) - print('Node not found for node: %s' % connect_to) - raise Exception('Netsim run failed') - ip = node_ips[connect_to] - cmd = cmd % ip - if node['connect']['strategy'] == 'plain_with_id': - cnt = node_counts[node['connect']['node']] - id = i % cnt - connect_to = '%s_%d' % (node['connect']['node'], id) - if not (connect_to in node_ips): - logs_on_error(nodes, prefix) - cleanup_tmp_dirs(temp_dirs) - print('Node not found for node: %s' % connect_to) - raise Exception('Netsim run failed') - ip = node_ips[connect_to] - cmd = cmd % (ip, id) - if node['connect']['strategy'] == 'params': - cnt = node_counts[node['connect']['node']] - id = i % cnt - connect_to = '%s_%d' % (node['connect']['node'], id) - if not (connect_to in node_params): - logs_on_error(nodes, prefix) - cleanup_tmp_dirs(temp_dirs) - print('Node not found for node: %s' % connect_to) - raise Exception('Netsim run failed') - param = node_params[connect_to] - cmd = cmd % (param) - # cleanup_run = subprocess.run("sudo rm -rf /root/.local/share/iroh", shell=True, capture_output=True) - time.sleep(0.1) - env_vars['SSLKEYLOGFILE']= './logs/keylog_%s_%s.txt' % (prefix, node_name) - - temp_dir = tempfile.TemporaryDirectory(prefix='netsim', suffix='{}_{}'.format(prefix, node_name)) + + cmd = handle_connection_strategy( + node, node_counts, i, runner_id, node_ips, node_params + ) + + temp_dir = tempfile.TemporaryDirectory( + prefix="netsim", suffix=f"{prefix}_{node_name}_{runner_id}" + ) temp_dirs.append(temp_dir) - env_vars['IROH_DATA_DIR'] = '{}'.format(temp_dir.name) - - f.write('cmd: %s\n\n' % cmd) - f.flush() - p = n.popen(cmd, stdout=f, stderr=f, shell=True, env=env_vars) - if 'process' in node and node['process'] == 'short': + env_vars = setup_env_vars(prefix, node_name, temp_dir.name, debug) + + p = execute_node_command(cmd, prefix, node_name, n, env_vars) + if "process" in node and node["process"] == "short": p_short_box.append((node_name, p)) else: p_box.append(p) - if 'param_parser' in node: - done_wait = False - if not 'wait' in node: - node['wait'] = 1 - for z in range(node['wait']): - if done_wait: - break - time.sleep(1) - for zz in range(int(node['count'])): - found = 0 - node_name = '%s_%d' % (node['name'], zz) - n = net.get(node_name) - f = open('logs/%s__%s.txt' % (prefix, node_name), 'r') - lines = f.readlines() - for line in lines: - if node['param_parser'] == 'iroh_ticket': - if line.startswith('All-in-one ticket'): - node_params[node_name] = line[len('All-in-one ticket: '):].strip() - found+=1 - break - f.close() - if found == int(node['count']): - done_wait = True - break - else: - if 'wait' in node: - time.sleep(int(node['wait'])) - # CLI(net) + if "param_parser" in node: + node_params.update(parse_node_params(node, prefix, node_params, runner_id)) + elif "wait" in node: + time.sleep(int(node["wait"])) - process_errors = [] - for i in range(TIMEOUT): - time.sleep(1) - if not any(p.poll() is None for (n, p) in p_short_box): - break - for (node_name, p) in p_short_box: - if integration: - r = p.poll() - if r is None: - p.terminate() - process_errors.append('Process has timed out: %s for node: %s' % (prefix, node_name)) - elif r != 0: - process_errors.append('Process has failed: %s with exit code: %d for node: %s' % (prefix, r, node_name)) - else: - p.terminate() + # CLI(net) - if process_errors: + process_errors = monitor_short_processes(p_short_box, prefix) + if process_errors and args.integration: for error in process_errors: print(error) - logs_on_error(nodes, prefix) - cleanup_tmp_dirs(temp_dirs) - raise Exception('Netsim run failed') - - for p in p_box: - p.terminate() - net.stop() - sniffer.close() + eject(nodes, prefix, runner_id, temp_dirs) + terminate_processes(p_box) cleanup_tmp_dirs(temp_dirs) + return (net, sniffer) + + +def run(case, runner_id, name, skiplist, args): + prefix = name + "__" + case["name"] + if prefix in skiplist: + print("Skipping:", prefix) + return + nodes = case["nodes"] + viz = False + if "visualize" in case: + viz = case["visualize"] & args.visualize + print('Running "%s"...' % prefix) + n, s = (None, None) + if not args.reports_only: + (n, s) = run_case(nodes, runner_id, prefix, args, args.debug, viz) + process_logs(nodes, prefix, runner_id) + process_integration_logs(nodes, prefix, runner_id) + if viz: + viz_args = { + "path": "logs/" + prefix + ".viz.pcap", + "keylog": "logs/keylog_" + prefix + "_iroh_srv_0.txt", + "topo": "logs/" + prefix + ".topo.json", + "output": "viz/" + prefix + ".svg", + } + run_viz(viz_args) + return (n, s) -def cleanup_tmp_dirs(temp_dirs): - for temp_dir in temp_dirs: - temp_dir.cleanup() +def run_parallel(cases, name, skiplist, args, max_workers=4): + with concurrent.futures.ThreadPoolExecutor() as executor: + chunks = [cases[i : i + max_workers] for i in range(0, len(cases), max_workers)] + for chunk in chunks: + futures = [] + r = [] + for i, case in enumerate(chunk): + futures.append(executor.submit(run, case, i, name, skiplist, args)) + for future in concurrent.futures.as_completed(futures): + try: + rx = future.result() + r.append(rx) + except Exception as e: + print("Exception:", e) + sys.exit(1) + for n, s in r: + if n: + n.stop() + if s: + s.close() -if __name__ == '__main__': - setLogLevel('info') +if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument("cfg", help = "Input config file") - parser.add_argument("-r", help = "Run only report generation", action='store_true') - parser.add_argument("--integration", help = "Run in integration test mode", action='store_true') - parser.add_argument("--sniff", help = "Run sniffer to record all traffic", action='store_false') - parser.add_argument("--skip", help = "Comma separated list of tests to skip") - parser.add_argument("--debug", help = "Enable full debug logging", action='store_true') + parser.add_argument("cfg", help="Input config file") + parser.add_argument( + "--reports-only", + help="Run only report generation", + action="store_true", + default=False, + ) + parser.add_argument( + "--integration", + help="Run in integration test mode", + action="store_true", + default=False, + ) + parser.add_argument( + "--sniff", + help="Run sniffer to record all traffic", + action="store_true", + default=False, + ) + parser.add_argument("--skip", help="Comma separated list of tests to skip") + parser.add_argument( + "--debug", help="Enable full debug logging", action="store_true", default=False + ) + parser.add_argument( + "--visualize", help="Enable visualization", action="store_true", default=False + ) + parser.add_argument( + "--max-workers", help="Max workers for parallel execution", type=int, default=1 + ) + parser.add_argument( + "--netsim-log-level", help="Set log level for netsim", default="error" + ) args = parser.parse_args() - skiplist = [] - if args.skip: - skiplist = args.skip.split(',') + setLogLevel(args.netsim_log_level) + + skiplist = args.skip.split(",") if args.skip else [] paths = [] - is_dir = os.path.isdir(args.cfg) - if is_dir: - for root, dirs, files in os.walk(args.cfg): - for f in files: - if f.endswith('.json'): - paths.append(os.path.join(root, f)) + if os.path.isdir(args.cfg): + paths = [ + f for f in glob.glob(os.path.join(args.cfg, "**", "*.json"), recursive=True) + ] else: paths.append(args.cfg) - for path in paths: - config_f = open(path, 'r') + print("Args:", args) + + for path in paths: + config_f = open(path, "r") config = json.load(config_f) - print('start test\n') - name = config['name'] - - for case in config['cases']: - prefix = name + '__' + case['name'] - if prefix in skiplist: - print("Skipping:", prefix) - continue - nodes = case['nodes'] - viz = False - # ignore any viz config as we really don't use it - # if 'visualize' in case: - # viz = case['visualize'] - print('running "%s"...' % prefix) - if not args.r: - run(nodes, prefix, args, args.debug, viz) - stats_parser(nodes, prefix) - integration_parser(nodes, prefix) - if viz: - viz_args = { - 'path': 'logs/' + prefix + '.viz.pcap', - 'keylog': 'logs/keylog_' + prefix +'_iroh_srv_0.txt', - 'topo': 'logs/' + prefix + '.topo.json', - 'output': 'viz/' + prefix + '.svg' - } - run_viz(viz_args) \ No newline at end of file + config_f.close() + name = config["name"] + print(f"Start testing: %s\n" % path) + run_parallel(config["cases"], name, skiplist, args, args.max_workers) + + print("Done") diff --git a/netsim/net/__init__.py b/netsim/net/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/netsim/net/link.py b/netsim/net/link.py new file mode 100644 index 0000000..f122b12 --- /dev/null +++ b/netsim/net/link.py @@ -0,0 +1,645 @@ +""" +Moved into the repo to allow for higher bandwidth limits and +more control over the link parameters. + +############################################################################### + +link.py: interface and link abstractions for mininet + +It seems useful to bundle functionality for interfaces into a single +class. + +Also it seems useful to enable the possibility of multiple flavors of +links, including: + +- simple veth pairs +- tunneled links +- patchable links (which can be disconnected and reconnected via a patchbay) +- link simulators (e.g. wireless) + +Basic division of labor: + + Nodes: know how to execute commands + Intfs: know how to configure themselves + Links: know how to connect nodes together + +Intf: basic interface object that can configure itself +TCIntf: interface with bandwidth limiting and delay via tc + +Link: basic link class for creating veth pairs +""" + +import re + +from mininet.log import info, error, debug +from mininet.util import makeIntfPair + +# Make pylint happy: +# pylint: disable=too-many-arguments + + +class Intf(object): + + "Basic interface object that can configure itself." + + def __init__(self, name, node=None, port=None, link=None, mac=None, **params): + """name: interface name (e.g. h1-eth0) + node: owning node (where this intf most likely lives) + link: parent link if we're part of a link + other arguments are passed to config()""" + self.node = node + self.name = name + self.link = link + self.mac = mac + self.ip, self.prefixLen = None, None + + # if interface is lo, we know the ip is 127.0.0.1. + # This saves an ifconfig command per node + if self.name == "lo": + self.ip = "127.0.0.1" + self.prefixLen = 8 + # Add to node (and move ourselves if necessary ) + if node: + moveIntfFn = params.pop("moveIntfFn", None) + if moveIntfFn: + node.addIntf(self, port=port, moveIntfFn=moveIntfFn) + else: + node.addIntf(self, port=port) + # Save params for future reference + self.params = params + self.config(**params) + + def cmd(self, *args, **kwargs): + "Run a command in our owning node" + return self.node.cmd(*args, **kwargs) + + def ifconfig(self, *args): + "Configure ourselves using ifconfig" + return self.cmd("ifconfig", self.name, *args) + + def setIP(self, ipstr, prefixLen=None): + """Set our IP address""" + # This is a sign that we should perhaps rethink our prefix + # mechanism and/or the way we specify IP addresses + if "/" in ipstr: + self.ip, self.prefixLen = ipstr.split("/") + return self.ifconfig(ipstr, "up") + else: + if prefixLen is None: + raise Exception("No prefix length set for IP address %s" % (ipstr,)) + self.ip, self.prefixLen = ipstr, prefixLen + return self.ifconfig("%s/%s" % (ipstr, prefixLen)) + + def setMAC(self, macstr): + """Set the MAC address for an interface. + macstr: MAC address as string""" + self.mac = macstr + return ( + self.ifconfig("down") + + self.ifconfig("hw", "ether", macstr) + + self.ifconfig("up") + ) + + _ipMatchRegex = re.compile(r"\d+\.\d+\.\d+\.\d+") + _macMatchRegex = re.compile(r"..:..:..:..:..:..") + + def updateIP(self): + "Return updated IP address based on ifconfig" + # use pexec instead of node.cmd so that we dont read + # backgrounded output from the cli. + ifconfig, _err, _exitCode = self.node.pexec("ifconfig %s" % self.name) + ips = self._ipMatchRegex.findall(ifconfig) + self.ip = ips[0] if ips else None + return self.ip + + def updateMAC(self): + "Return updated MAC address based on ifconfig" + ifconfig = self.ifconfig() + macs = self._macMatchRegex.findall(ifconfig) + self.mac = macs[0] if macs else None + return self.mac + + # Instead of updating ip and mac separately, + # use one ifconfig call to do it simultaneously. + # This saves an ifconfig command, which improves performance. + + def updateAddr(self): + "Return IP address and MAC address based on ifconfig." + ifconfig = self.ifconfig() + ips = self._ipMatchRegex.findall(ifconfig) + macs = self._macMatchRegex.findall(ifconfig) + self.ip = ips[0] if ips else None + self.mac = macs[0] if macs else None + return self.ip, self.mac + + def IP(self): + "Return IP address" + return self.ip + + def MAC(self): + "Return MAC address" + return self.mac + + def isUp(self, setUp=False): + "Return whether interface is up" + if setUp: + cmdOutput = self.ifconfig("up") + # no output indicates success + if cmdOutput: + error("Error setting %s up: %s " % (self.name, cmdOutput)) + return False + else: + return True + else: + return "UP" in self.ifconfig() + + def rename(self, newname): + "Rename interface" + if self.node and self.name in self.node.nameToIntf: + # rename intf in node's nameToIntf + self.node.nameToIntf[newname] = self.node.nameToIntf.pop(self.name) + self.ifconfig("down") + result = self.cmd("ip link set", self.name, "name", newname) + self.name = newname + self.ifconfig("up") + return result + + # The reason why we configure things in this way is so + # That the parameters can be listed and documented in + # the config method. + # Dealing with subclasses and superclasses is slightly + # annoying, but at least the information is there! + + def setParam(self, results, method, **param): + """Internal method: configure a *single* parameter + results: dict of results to update + method: config method name + param: arg=value (ignore if value=None) + value may also be list or dict""" + name, value = list(param.items())[0] + f = getattr(self, method, None) + if not f or value is None: + return None + if isinstance(value, list): + result = f(*value) + elif isinstance(value, dict): + result = f(**value) + else: + result = f(value) + results[name] = result + return result + + def config(self, mac=None, ip=None, ifconfig=None, up=True, **_params): + """Configure Node according to (optional) parameters: + mac: MAC address + ip: IP address + ifconfig: arbitrary interface configuration + Subclasses should override this method and call + the parent class's config(**params)""" + # If we were overriding this method, we would call + # the superclass config method here as follows: + # r = Parent.config( **params ) + r = {} + self.setParam(r, "setMAC", mac=mac) + self.setParam(r, "setIP", ip=ip) + self.setParam(r, "isUp", up=up) + self.setParam(r, "ifconfig", ifconfig=ifconfig) + return r + + def delete(self): + "Delete interface" + self.cmd("ip link del " + self.name) + # We used to do this, but it slows us down: + # if self.node.inNamespace: + # Link may have been dumped into root NS + # quietRun( 'ip link del ' + self.name ) + self.node.delIntf(self) + self.link = None + + def status(self): + "Return intf status as a string" + links, _err, _result = self.node.pexec("ip link show") + if self.name in links: + return "OK" + else: + return "MISSING" + + def __repr__(self): + return "<%s %s>" % (self.__class__.__name__, self.name) + + def __str__(self): + return self.name + + +class TCIntf(Intf): + """Interface customized by tc (traffic control) utility + Allows specification of bandwidth limits (various methods) + as well as delay, loss and max queue length""" + + # The parameters we use seem to work reasonably up to 1 Gb/sec + # For higher data rates, we will probably need to change them. + bwParamMax = 100000000 + + def bwCmds( + self, + bw=None, + speedup=0, + use_hfsc=False, + use_tbf=False, + latency_ms=None, + enable_ecn=False, + enable_red=False, + ): + "Return tc commands to set bandwidth" + + cmds, parent = [], " root " + + if bw and (bw < 0 or bw > self.bwParamMax): + error( + "Bandwidth limit", + bw, + "is outside supported range 0..%d" % self.bwParamMax, + "- ignoring\n", + ) + elif bw is not None: + # BL: this seems a bit brittle... + if speedup > 0 and self.node.name[0:1] == "s": + bw = speedup + # This may not be correct - we should look more closely + # at the semantics of burst (and cburst) to make sure we + # are specifying the correct sizes. For now I have used + # the same settings we had in the mininet-hifi code. + if use_hfsc: + cmds += [ + "%s qdisc add dev %s root handle 5:0 hfsc default 1", + "%s class add dev %s parent 5:0 classid 5:1 hfsc sc " + + "rate %fMbit ul rate %fMbit" % (bw, bw), + ] + elif use_tbf: + if latency_ms is None: + latency_ms = 15.0 * 8 / bw + cmds += [ + "%s qdisc add dev %s root handle 5: tbf " + + "rate %fMbit burst 15000 latency %fms" % (bw, latency_ms) + ] + else: + cmds += [ + "%s qdisc add dev %s root handle 5:0 htb default 1", + "%s class add dev %s parent 5:0 classid 5:1 htb " + + "rate %fMbit burst 15k" % bw, + ] + parent = " parent 5:1 " + + # ECN or RED + if enable_ecn: + cmds += [ + "%s qdisc add dev %s" + + parent + + "handle 6: red limit 1000000 " + + "min 30000 max 35000 avpkt 1500 " + + "burst 20 " + + "bandwidth %fmbit probability 1 ecn" % bw + ] + parent = " parent 6: " + elif enable_red: + cmds += [ + "%s qdisc add dev %s" + + parent + + "handle 6: red limit 1000000 " + + "min 30000 max 35000 avpkt 1500 " + + "burst 20 " + + "bandwidth %fmbit probability 1" % bw + ] + parent = " parent 6: " + return cmds, parent + + @staticmethod + def delayCmds(parent, delay=None, jitter=None, loss=None, max_queue_size=None): + "Internal method: return tc commands for delay and loss" + cmds = [] + if loss and (loss < 0 or loss > 100): + error("Bad loss percentage", loss, "%%\n") + else: + # Delay/jitter/loss/max queue size + netemargs = "%s%s%s%s" % ( + "delay %s " % delay if delay is not None else "", + "%s " % jitter if jitter is not None else "", + "loss %.5f " % loss if (loss is not None and loss > 0) else "", + "limit %d" % max_queue_size if max_queue_size is not None else "", + ) + if netemargs: + cmds = [ + "%s qdisc add dev %s " + parent + " handle 10: netem " + netemargs + ] + parent = " parent 10:1 " + return cmds, parent + + def tc(self, cmd, tc="tc"): + "Execute tc command for our interface" + c = cmd % (tc, self) # Add in tc command and our name + debug(" *** executing command: %s\n" % c) + return self.cmd(c) + + # pylint: disable=arguments-differ + def config( + self, + bw=None, + delay=None, + jitter=None, + loss=None, + gro=False, + txo=True, + rxo=True, + speedup=0, + use_hfsc=False, + use_tbf=False, + latency_ms=None, + enable_ecn=False, + enable_red=False, + max_queue_size=None, + **params + ): + """Configure the port and set its properties. + bw: bandwidth in b/s (e.g. '10m') + delay: transmit delay (e.g. '1ms' ) + jitter: jitter (e.g. '1ms') + loss: loss (e.g. '1%' ) + gro: enable GRO (False) + txo: enable transmit checksum offload (True) + rxo: enable receive checksum offload (True) + speedup: experimental switch-side bw option + use_hfsc: use HFSC scheduling + use_tbf: use TBF scheduling + latency_ms: TBF latency parameter + enable_ecn: enable ECN (False) + enable_red: enable RED (False) + max_queue_size: queue limit parameter for netem""" + + # Support old names for parameters + gro = not params.pop("disable_gro", not gro) + + result = Intf.config(self, **params) + + def on(isOn): + "Helper method: bool -> 'on'/'off'" + return "on" if isOn else "off" + + # Set offload parameters with ethool + self.cmd("ethtool -K", self, "gro", on(gro), "tx", on(txo), "rx", on(rxo)) + + # Optimization: return if nothing else to configure + # Question: what happens if we want to reset things? + if bw is None and not delay and not loss and max_queue_size is None: + return None + + # Clear existing configuration + tcoutput = self.tc("%s qdisc show dev %s") + if "priomap" not in tcoutput and "noqueue" not in tcoutput: + cmds = ["%s qdisc del dev %s root"] + else: + cmds = [] + + # Bandwidth limits via various methods + bwcmds, parent = self.bwCmds( + bw=bw, + speedup=speedup, + use_hfsc=use_hfsc, + use_tbf=use_tbf, + latency_ms=latency_ms, + enable_ecn=enable_ecn, + enable_red=enable_red, + ) + cmds += bwcmds + + # Delay/jitter/loss/max_queue_size using netem + delaycmds, parent = self.delayCmds( + delay=delay, + jitter=jitter, + loss=loss, + max_queue_size=max_queue_size, + parent=parent, + ) + cmds += delaycmds + + # Ugly but functional: display configuration info + stuff = ( + (["%.2fMbit" % bw] if bw is not None else []) + + (["%s delay" % delay] if delay is not None else []) + + (["%s jitter" % jitter] if jitter is not None else []) + + (["%.5f%% loss" % loss] if loss is not None else []) + + (["ECN"] if enable_ecn else ["RED"] if enable_red else []) + ) + info("(" + " ".join(stuff) + ") ") + + # Execute all the commands in our node + debug("at map stage w/cmds: %s\n" % cmds) + tcoutputs = [self.tc(cmd) for cmd in cmds] + for output in tcoutputs: + if output != "": + error("*** Error: %s" % output) + debug("cmds:", cmds, "\n") + debug("outputs:", tcoutputs, "\n") + result["tcoutputs"] = tcoutputs + result["parent"] = parent + + return result + + +class Link(object): + """A basic link is just a veth pair. + Other types of links could be tunnels, link emulators, etc..""" + + # pylint: disable=too-many-branches + def __init__( + self, + node1, + node2, + port1=None, + port2=None, + intfName1=None, + intfName2=None, + addr1=None, + addr2=None, + intf=Intf, + cls1=None, + cls2=None, + params1=None, + params2=None, + fast=True, + **params + ): + """Create veth link to another node, making two new interfaces. + node1: first node + node2: second node + port1: node1 port number (optional) + port2: node2 port number (optional) + intf: default interface class/constructor + cls1, cls2: optional interface-specific constructors + intfName1: node1 interface name (optional) + intfName2: node2 interface name (optional) + params1: parameters for interface 1 (optional) + params2: parameters for interface 2 (optional) + **params: additional parameters for both interfaces""" + + # This is a bit awkward; it seems that having everything in + # params is more orthogonal, but being able to specify + # in-line arguments is more convenient! So we support both. + params1 = dict(params1) if params1 else {} + params2 = dict(params2) if params2 else {} + if port1 is not None: + params1["port"] = port1 + if port2 is not None: + params2["port"] = port2 + if "port" not in params1: + params1["port"] = node1.newPort() + if "port" not in params2: + params2["port"] = node2.newPort() + if not intfName1: + intfName1 = self.intfName(node1, params1["port"]) + if not intfName2: + intfName2 = self.intfName(node2, params2["port"]) + + # Update with remaining parameter list + params1.update(params) + params2.update(params) + + self.fast = fast + if fast: + params1.setdefault("moveIntfFn", self._ignore) + params2.setdefault("moveIntfFn", self._ignore) + self.makeIntfPair( + intfName1, intfName2, addr1, addr2, node1, node2, deleteIntfs=False + ) + else: + self.makeIntfPair(intfName1, intfName2, addr1, addr2) + + if not cls1: + cls1 = intf + if not cls2: + cls2 = intf + + intf1 = cls1(name=intfName1, node=node1, link=self, mac=addr1, **params1) + intf2 = cls2(name=intfName2, node=node2, link=self, mac=addr2, **params2) + + # All we are is dust in the wind, and our two interfaces + self.intf1, self.intf2 = intf1, intf2 + + # pylint: enable=too-many-branches + + @staticmethod + def _ignore(*args, **kwargs): + "Ignore any arguments" + pass + + def intfName(self, node, n): + "Construct a canonical interface name node-ethN for interface n." + # Leave this as an instance method for now + assert self + return node.name + "-e" + repr(n) + + @classmethod + def makeIntfPair( + cls, + intfname1, + intfname2, + addr1=None, + addr2=None, + node1=None, + node2=None, + deleteIntfs=True, + ): + """Create pair of interfaces + intfname1: name for interface 1 + intfname2: name for interface 2 + addr1: MAC address for interface 1 (optional) + addr2: MAC address for interface 2 (optional) + node1: home node for interface 1 (optional) + node2: home node for interface 2 (optional) + (override this method [and possibly delete()] + to change link type)""" + # Leave this as a class method for now + assert cls + return makeIntfPair( + intfname1, intfname2, addr1, addr2, node1, node2, deleteIntfs=deleteIntfs + ) + + def delete(self): + "Delete this link" + self.intf1.delete() + self.intf1 = None + self.intf2.delete() + self.intf2 = None + + def stop(self): + "Override to stop and clean up link as needed" + self.delete() + + def status(self): + "Return link status as a string" + return "(%s %s)" % (self.intf1.status(), self.intf2.status()) + + def __str__(self): + return "%s<->%s" % (self.intf1, self.intf2) + + +class OVSIntf(Intf): + "Patch interface on an OVSSwitch" + + def ifconfig(self, *args): + cmd = " ".join(args) + if cmd == "up": + # OVSIntf is always up + return + else: + raise Exception("OVSIntf cannot do ifconfig " + cmd) + + +class OVSLink(Link): + """Link that makes patch links between OVSSwitches + Warning: in testing we have found that no more + than ~64 OVS patch links should be used in row.""" + + def __init__(self, node1, node2, **kwargs): + "See Link.__init__() for options" + try: + OVSSwitch + except NameError: + # pylint: disable=import-outside-toplevel,cyclic-import + from mininet.node import OVSSwitch + self.isPatchLink = False + if isinstance(node1, OVSSwitch) and isinstance(node2, OVSSwitch): + self.isPatchLink = True + kwargs.update(cls1=OVSIntf, cls2=OVSIntf) + Link.__init__(self, node1, node2, **kwargs) + + # pylint: disable=arguments-differ, signature-differs + def makeIntfPair(self, *args, **kwargs): + "Usually delegated to OVSSwitch" + if self.isPatchLink: + return None, None + else: + return Link.makeIntfPair(*args, **kwargs) + + +class TCLink(Link): + "Link with TC interfaces" + + def __init__(self, *args, **kwargs): + kwargs.setdefault("cls1", TCIntf) + kwargs.setdefault("cls2", TCIntf) + Link.__init__(self, *args, **kwargs) + + +class TCULink(TCLink): + """TCLink with default settings optimized for UserSwitch + (txo=rxo=0/False). Unfortunately with recent Linux kernels, + enabling TX and RX checksum offload on veth pairs doesn't work + well with UserSwitch: either it gets terrible performance or + TCP packets with bad checksums are generated, forwarded, and + *dropped* due to having bad checksums! OVS and LinuxBridge seem + to cope with this somehow, but it is likely to be an issue with + many software Ethernet bridges.""" + + def __init__(self, *args, **kwargs): + kwargs.update(txo=False, rxo=False) + TCLink.__init__(self, *args, **kwargs) diff --git a/netsim/net/network.py b/netsim/net/network.py new file mode 100644 index 0000000..81b1065 --- /dev/null +++ b/netsim/net/network.py @@ -0,0 +1,166 @@ +from mininet.topo import Topo +from mininet.nodelib import NAT +from mininet.node import Node + + +class StarTopo(Topo): + """Single switch connected to n hosts. + Default network layout: + h0 h1 + \ / + s1 - hN...""" + + def build( + self, + nodes=[ + {"name": "hx", "count": 1, "type": "public"}, + {"name": "h", "count": 1, "type": "public"}, + ], + runner_id=0, + interconnect="s1", + ): + + self.runner_id = runner_id + routerName = "r0_" + str(runner_id) + defaultIP = "10.0.0.1/8" # IP address for r0-eth1 + router = self.addNode(routerName, cls=LinuxRouter, ip="10.1.1.1") + interconnect = self.addSwitch(interconnect + "-r" + str(runner_id)) + + self.addLink( + interconnect, + router, + intfName2=routerName + "-eth1", + params2={"ip": defaultIP}, + ) + + kk = 0 + for node in nodes: + if node["type"] == "public": + for i in range(int(node["count"])): + h = self.addHost( + "%s_%d_r%d" % (node["name"], i, runner_id), + cls=EdgeNode, + defaultRoute="via 10.1.1.1", + ) + if "link" in node: + loss = node["link"]["loss"] + latency = node["link"]["latency"] + bw = node["link"]["bw"] + self.addLink(interconnect, h, loss=loss, delay=latency, bw=bw) + else: + self.addLink(interconnect, h) + + if node["type"] == "nat": + kk += 1 + for i in range(int(node["count"])): + inetIntf = "n_%s%dr%d-e0" % (node["name"], i, runner_id) + localIntf = "n_%s%dr%d-e1" % (node["name"], i, runner_id) + localIP = "192.168.%d.1" % i + localSubnet = "192.168.%d.0/24" % i + natParams = {"ip": "%s/24" % localIP} + # add NAT to topology + nat = self.addNode( + "n_%s%dr%d" % (node["name"], i, runner_id), + cls=NAT, + subnet=localSubnet, + inetIntf=inetIntf, + localIntf=localIntf, + ) + switch = self.addSwitch( + "natsw%s%dr%d" % (node["name"][:2], i, runner_id) + ) + # connect NAT to inet and local switches + self.addLink(nat, interconnect, intfName1=inetIntf) + self.addLink(nat, switch, intfName1=localIntf, params1=natParams) + # add host and connect to local switch + host = self.addHost( + "%s_%d_r%d" % (node["name"], i, runner_id), + ip="192.168.%d.10%d/24" % (i, kk), + defaultRoute="via %s" % localIP, + ) + if "link" in node: + loss = node["link"]["loss"] + latency = node["link"]["latency"] + bw = node["link"]["bw"] + self.addLink(host, switch, loss=loss, delay=latency, bw=bw) + else: + self.addLink(host, switch) + + box = self.addHost( + "zbox1-r" + str(runner_id), + cls=EdgeNode, + ip="10.1.1.2", + defaultRoute="via 10.1.1.1", + ) # creates a dedicated node to play around + self.addLink(interconnect, box) + + +"A Node with multicast stuff." + + +class EdgeNode(Node): + def config(self, **params): + super(EdgeNode, self).config(**params) + intfName = self.intfNames()[0] + self.cmd("sysctl net.ipv4.icmp_echo_ignore_broadcasts=0") + self.cmd("route add -net 224.0.0.0 netmask 240.0.0.0 dev " + intfName) + self.cmd("smcrouted -l debug -I smcroute-" + self.name) + self.cmd("sleep 1") + self.cmd( + "smcroutectl -I smcroute-" + self.name + " join " + intfName + " 239.0.0.1" + ) + + def terminate(self): + self.cmd("smcroutectl -I smcroute-" + self.name + " kill") + super(EdgeNode, self).terminate() + + +"A Node with IP forwarding enabled." + + +class LinuxRouter(Node): + def config(self, **params): + super(LinuxRouter, self).config(**params) + # Enable forwarding on the router + self.cmd("sysctl net.ipv4.ip_forward=1") + self.cmd("sysctl net.ipv4.icmp_echo_ignore_broadcasts=0") + self.cmd("sysctl net.ipv4.conf." + self.name + "-eth1.force_igmp_version=2") + self.cmd("smcrouted -l debug -I smcroute-" + self.name) + self.cmd("sleep 1") + self.cmd( + "smcroutectl -I smcroute-" + + self.name + + " add " + + self.name + + "-eth1 239.0.0.1 " + + self.name + + "-eth2 " + + self.name + + "-eth3" + ) + + def terminate(self): + self.cmd("sysctl net.ipv4.ip_forward=0") + self.cmd("smcroutectl -I smcroute-" + self.name + " kill") + super(LinuxRouter, self).terminate() + + +def portForward(net, id, dport): + nat = net.get("nat%d" % id) + h = net.get("h%d" % id) + destIP = h.IP() + dest = str(destIP) + ":" + str(dport) + fport = dport + intf = "nat%d-eth0" % id + nat.cmd( + "iptables -A PREROUTING", + "-t nat -i", + intf, + "-p tcp --dport", + fport, + "-j DNAT --to", + dest, + ) + nat.cmd( + "iptables -A FORWARD", "-p tcp", "-d", destIP, "--dport", dport, "-j ACCEPT" + ) diff --git a/netsim/netsim_parser.py b/netsim/netsim_parser.py deleted file mode 100644 index 3952af8..0000000 --- a/netsim/netsim_parser.py +++ /dev/null @@ -1,233 +0,0 @@ -import json -import os -import humanfriendly - -invalid_results = { - 'data_len': 0, - 'elapsed': 0, - 'mbits': -1.0, - 'reported_mbits': 0, - 'reported_time': 0, - } - -def parse_time_output(lines, size): - for line in lines: - if line.startswith('real'): - k = line[5:].strip() - k = k.split('m') - mins = int(k[0]) - sec = float(k[1][:-1]) - d = mins * 60 + sec - s = { - 'data_len': size, - 'elapsed': d, - 'mbits': float(size * 8) / (d * 1000 * 1000), - 'reported_mbits': 0, - 'reported_time': 0, - } - return s - return invalid_results - -def parse_humanized_output(line): - p = line.split(', ')[-1] - v_bytes = humanfriendly.parse_size(p, binary=True) - v_mbits = float(v_bytes*8) / (1024*1024) - return v_mbits - -def parse_iperf_udp_server(lines): - s = [] - collect = False - for line in lines: - if line.startswith('[ ID]'): - collect = True - continue - if collect: - if 'datagram' in line: - continue - k = line.split(' ') - k = [x for x in k if x != ''] - p = k.index('sec') - transfer = float(k[p+1]) - if k[p+2] == 'GBytes': - transfer *= 1024 * 1024 * 1024 - if k[p+2] == 'MBytes': - transfer *= 1024 * 1024 - if k[p+2] == 'KBytes': - transfer *= 1024 - throughput = float(k[p+3]) - if k[p+4].strip() == 'Gbits/sec': - throughput *= 1000 - stat = {} - stat['data_len'] = transfer - stat['elapsed'] = float(10.0) - stat['mbits'] = throughput - s.append(stat) - return s - -def parse_iroh_client(lines): - s = {} - for line in lines: - if line.startswith('Stats'): - k = line.replace('Stats ', '') - k = k.replace('data_len', '"data_len"') - k = k.replace('elapsed', '"elapsed"') - k = k.replace('s,', ',') - k = k.replace('mbits', '"mbits"') - d = json.loads(k) - s['data_len'] = int(d['data_len']) - s['elapsed'] = float(d['elapsed']) - s['mbits'] = float(d['mbits']) - if not 'data_len' in s: - return invalid_results - return s - -def parse_iperf_server(lines): - s = [] - collect = False - for line in lines: - if line.startswith('[ ID]'): - collect = True - continue - if collect: - k = line.split(' ') - k = [x for x in k if x != ''] - p = k.index('sec') - transfer = float(k[p+1]) - if k[p+2] == 'GBytes': - transfer *= 1024 * 1024 * 1024 - if k[p+2] == 'MBytes': - transfer *= 1024 * 1024 - if k[p+2] == 'KBytes': - transfer *= 1024 - throughput = float(k[p+3]) - if k[p+4].strip() == 'Gbits/sec': - throughput *= 1000 - stat = {} - stat['data_len'] = transfer - stat['elapsed'] = float(10.0) - stat['mbits'] = throughput - s.append(stat) - return s - -def aggregate_stats(stats): - summed = {} - for s in stats: - for k, v in s.items(): - if k in summed: - summed[k] += v - else: - summed[k] = v - avrged = {} - for k, v in summed.items(): - avrged[k] = float(v) / len(stats) - return (summed, avrged) - -def stats_parser(nodes, prefix): - files = [] - valid_parsers = ['iroh_client', 'iperf_server', 'iperf_udp_server', 'time_1gb', 'iroh_1gb', 'iroh_cust_'] - for root, dirs, fs in os.walk('logs'): - for f in fs: - if f.startswith(prefix + '__'): - files.append(os.path.join(root,f)) - for node in nodes: - if 'parser' in node: - stats = [] - try: - if any(node['parser'].startswith(prefix) for prefix in valid_parsers): - for i in range(int(node['count'])): - log_path = 'logs/%s__%s_%d.txt' %(prefix, node['name'], i) - f = open(log_path, 'r') - lines = f.readlines() - if node['parser'] == 'iroh_client': - s = parse_iroh_client(lines) - stats.append(s) - if node['parser'] == 'iperf_server': - s = parse_iperf_server(lines) - stats.extend(s) - if node['parser'] == 'iperf_udp_server': - s = parse_iperf_udp_server(lines) - stats.extend(s) - if node['parser'] == 'time_1gb': - s = parse_time_output(lines, 1024*1024*1024) - stats.append(s) - if node['parser'] in ['iroh_1gb', 'iroh_1mb'] or node['parser'].startswith('iroh_cust_'): - is_ok = 0 - reported = 0 - reported_time = 0 - f_size = 1024*1024*1024 - if node['parser'] == 'iroh_1mb': - f_size = 1024*1024 - if node['parser'].startswith('iroh_cust_'): - f_size_str = node['parser'].split('_')[-1] - f_size = humanfriendly.parse_size(f_size_str, binary=True) - for line in lines: - if 'Transferred' in line and 'in' in line and '/s' in line: - is_ok += 1 - reported = parse_humanized_output(line) - reported_time = (f_size*8) / (reported*1000*1000) - if is_ok == 0: - raise Exception("bad run") - s = parse_time_output(lines, f_size) - s['reported_mbits'] = reported - s['reported_time'] = reported_time - stats.append(s) - except: - stats = [{ - 'data_len': 0, - 'elapsed': 0, - 'mbits': -1.0 - }] - (sum_stats, avg_stats) = aggregate_stats(stats) - report = { - 'raw': stats, - 'sum': sum_stats, - 'avg': avg_stats - } - report_json = json.dumps(report, indent=4) - f = open("report/%s__%s.json" % (prefix, node['name']), "w") - f.write(report_json) - f.close() - -def parse_magic_iroh_client(lines): - s = { - 'conn_upgrade': 'false', - 'transfer_success': 'false', - } - is_ok = 0 - for line in lines: - if 'Transferred' in line and 'in' in line and '/s' in line: - is_ok += 1 - if 'found send address' in line: - s['conn_upgrade'] = 'true' - s['transfer_success'] = 'true' if is_ok == 1 else 'false' - return s - -def integration_parser(nodes, prefix): - files = [] - valid_parsers = ['magic_iroh_client'] - for root, dirs, fs in os.walk('logs'): - for f in fs: - if f.startswith(prefix + '__'): - files.append(os.path.join(root,f)) - for node in nodes: - if 'integration' in node: - stats = [] - try: - if node['integration'] in valid_parsers: - for i in range(int(node['count'])): - log_path = 'logs/%s__%s_%d.txt' % (prefix, node['name'], i) - f = open(log_path, 'r') - lines = f.readlines() - if node['integration'] == 'magic_iroh_client': - s = parse_magic_iroh_client(lines) - s['node'] = '%s__%s_%d' % (prefix, node['name'], i) - print(s) - stats.append(s) - except: - print("Integration error") - stats = [] - - report_json = json.dumps(stats, indent=4) - f = open("report/integration_%s__%s.json" % (prefix, node['name']), "w") - f.write(report_json) - f.close() diff --git a/netsim/network.py b/netsim/network.py deleted file mode 100644 index 27912ca..0000000 --- a/netsim/network.py +++ /dev/null @@ -1,63 +0,0 @@ -from mininet.topo import Topo -from mininet.nodelib import NAT - -""" -Default network layout: - h0 h1 - \ / - s1 - hN... -""" -class StarTopo(Topo): - "Single switch connected to n hosts." - def build(self, nodes=[{'name': 'hx', 'count': 1, 'type': 'public'}, {'name': 'h', 'count': 1, 'type': 'public'}], interconnect='s1'): - interconnect = self.addSwitch(interconnect) - kk = 0 - for node in nodes: - if node['type'] == 'public': - for i in range(int(node['count'])): - h = self.addHost('%s_%d' % (node['name'], i)) - if 'link' in node: - loss = node['link']['loss'] - latency = node['link']['latency'] - bw = node['link']['bw'] - self.addLink(interconnect, h, loss=loss, delay=latency, bw=bw) - else: - self.addLink(interconnect, h) - - if node['type'] == 'nat': - kk+=1 - for i in range(int(node['count'])): - inetIntf = 'n_%s%d-e0' % (node['name'], i) - localIntf = 'n_%s%d-e1' % (node['name'], i) - localIP = '192.168.%d.1' % i - localSubnet = '192.168.%d.0/24' % i - natParams = { 'ip' : '%s/24' % localIP } - # add NAT to topology - nat = self.addNode('n_%s%d' % (node['name'], i), cls=NAT, subnet=localSubnet, - inetIntf=inetIntf, localIntf=localIntf) - switch = self.addSwitch('natsw%s%d' % (node['name'][:2], i)) - # connect NAT to inet and local switches - self.addLink(nat, interconnect, intfName1=inetIntf) - self.addLink(nat, switch, intfName1=localIntf, params1=natParams) - # add host and connect to local switch - host = self.addHost('%s_%d' % (node['name'], i), - ip='192.168.%d.10%d/24' % (i, kk), - defaultRoute='via %s' % localIP) - if 'link' in node: - loss = node['link']['loss'] - latency = node['link']['latency'] - bw = node['link']['bw'] - self.addLink(host, switch, loss=loss, delay=latency, bw=bw) - else: - self.addLink(host, switch) - - -def portForward(net, id, dport): - nat = net.get('nat%d' % id) - h = net.get('h%d' % id) - destIP = h.IP() - dest = str(destIP) + ':' + str(dport) - fport = dport - intf = 'nat%d-eth0' % id - nat.cmd( 'iptables -A PREROUTING', '-t nat -i', intf, '-p tcp --dport', fport, '-j DNAT --to', dest ) - nat.cmd( 'iptables -A FORWARD', '-p tcp', '-d', destIP, '--dport', dport, '-j ACCEPT' ) \ No newline at end of file diff --git a/netsim/parsing/__init__.py b/netsim/parsing/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/netsim/parsing/netsim.py b/netsim/parsing/netsim.py new file mode 100644 index 0000000..72ce88c --- /dev/null +++ b/netsim/parsing/netsim.py @@ -0,0 +1,172 @@ +import json +import os +import humanfriendly + +invalid_results = { + "data_len": 0, + "elapsed": 0, + "mbits": -1.0, + "reported_mbits": 0, + "reported_time": 0, +} + + +def parse_time_output(lines, size): + """Parse time output and calculate throughput.""" + for line in lines: + if line.startswith("real"): + k = line[5:].strip() + k = k.split("m") + mins = int(k[0]) + sec = float(k[1][:-1]) + elapsed = mins * 60 + sec + return { + "data_len": size, + "elapsed": elapsed, + "mbits": size * 8 / (elapsed * 1e6), + "reported_mbits": 0, + "reported_time": 0, + } + return invalid_results + + +def parse_iroh_output(lines, size): + """Parse iroh output and calculate throughput.""" + transfer_lines = [ + line + for line in lines + if "Transferred" in line and "in" in line and "/s" in line + ] + if not transfer_lines: + raise Exception("bad run") + + reported = parse_humanized_output(transfer_lines[-1]) + reported_time = (size * 8) / (reported * 1_000_000) + + s = parse_time_output(lines, size) + s["reported_mbits"] = reported + s["reported_time"] = reported_time + return s + + +def parse_humanized_output(line): + """Convert human-readable size to megabits.""" + bytes_size = humanfriendly.parse_size(line.split(", ")[-1], binary=True) + return bytes_size * 8 / 1e6 + + +def parse_iperf(lines): + """Generic parser for iperf server output.""" + stats = [] + collect = False + for line in lines: + if line.startswith("[ ID]"): + collect = True + continue + if collect and "datagram" not in line: + parts = [x for x in line.split() if x] + p = parts.index("sec") + transfer = float(parts[p + 1]) * { + "KBytes": 1024, + "MBytes": 1024**2, + "GBytes": 1024**3, + }.get(parts[p + 2], 1) + throughput = float(parts[p + 3]) * ( + 1000 if "Gbits/sec" == parts[p + 4].strip() else 1 + ) + stats.append({"data_len": transfer, "elapsed": 10.0, "mbits": throughput}) + return stats + + +def parse_magic_iroh_client(lines): + """Parse magic iroh client integration logs.""" + s = {"conn_upgrade": "false", "transfer_success": "false"} + s["transfer_success"] = ( + "true" + if any( + "Transferred" in line and "in" in line and "/s" in line for line in lines + ) + else "false" + ) + s["conn_upgrade"] = ( + "true" if any("found send address" in line for line in lines) else "false" + ) + return s + + +def aggregate_stats(stats): + """Aggregate and average stats.""" + summed = {k: sum(d[k] for d in stats) for k in stats[0]} + avg = {k: summed[k] / len(stats) for k in summed} + return summed, avg + + +def write_report(prefix, name, stats): + """Write stats to a report file.""" + summed, avg = aggregate_stats(stats) + report = {"raw": stats, "sum": summed, "avg": avg} + with open(f"report/{prefix}__{name}.json", "w") as f: + json.dump(report, f, indent=4) + + +def process_logs(nodes, prefix, runner_id): + """Process logs based on provided nodes and parsers.""" + valid_parsers = { + "iperf_server": parse_iperf, + "iperf_udp_server": parse_iperf, + "time_1gb": lambda lines: [parse_time_output(lines, 1024 * 1024 * 1024)], + "iroh_1gb": lambda lines: [parse_iroh_output(lines, 1024 * 1024 * 1024)], + "iroh_1mb": lambda lines: [parse_iroh_output(lines, 1024 * 1024)], + "iroh_cust_": lambda lines, size: [parse_iroh_output(lines, size)], + } + for node in nodes: + if "parser" in node and ( + node["parser"] in valid_parsers or node["parser"].startswith("iroh_cust_") + ): + stats = [] + for i in range(int(node["count"])): + log_path = f'logs/{prefix}__{node["name"]}_{i}_r{runner_id}.txt' + try: + with open(log_path, "r") as f: + lines = f.readlines() + if node["parser"].startswith("iroh_cust_"): + size = humanfriendly.parse_size( + node["parser"].split("_")[-1], binary=True + ) + parser_func = valid_parsers["iroh_cust_"] + stats.extend(parser_func(lines, size)) + else: + parser_func = valid_parsers[node["parser"]] + stats.extend(parser_func(lines)) + except Exception as e: + print(f"Error processing {log_path}: {e}") + stats = [invalid_results] + write_report(prefix, node["name"], stats) + + +def process_integration_logs(nodes, prefix, runner_id): + """Process integration logs based on nodes and valid parsers.""" + valid_parsers = { + "magic_iroh_client": parse_magic_iroh_client, + } + for node in nodes: + if "integration" in node and node["integration"] in valid_parsers: + stats = [] + for i in range(int(node["count"])): + log_path = f'logs/{prefix}__{node["name"]}_{i}_r{runner_id}.txt' + try: + with open(log_path, "r") as f: + lines = f.readlines() + parser_func = valid_parsers[node["integration"]] + s = parser_func(lines) + s["node"] = f"{prefix}__{node['name']}_{i}" + stats.append(s) + except Exception: + stats = [] + write_integration_report(prefix, node["name"], stats) + + +def write_integration_report(prefix, name, stats): + """Write integration report to file.""" + with open(f"report/integration_{prefix}__{name}.json", "w") as f: + json.dump(stats, f, indent=4) diff --git a/netsim/parsing/reports.py b/netsim/parsing/reports.py new file mode 100644 index 0000000..ae5167c --- /dev/null +++ b/netsim/parsing/reports.py @@ -0,0 +1,132 @@ +import json +import time + + +def format_labels(commit, name, case, commit_label=True): + labels = f'name="{name}",case="{case}"' + if commit_label and commit: + labels += f',commit="{commit}"' + return labels + + +def print_metric(name, labels, value): + print(f"{name}{{{labels}}} {value:.4f}") + + +def res_to_prom(res, commit): + for test_name, cases in res.items(): + for case, metrics in cases.items(): + labels = format_labels(commit, test_name, case) + print_metric("throughput", labels, metrics["throughput"]) + print_metric("reported_throughput", labels, metrics["reported_throughput"]) + + +case_order = [ + "1_to_1", + "1_to_3", + "1_to_5", + "1_to_10", + "2_to_2", + "2_to_4", + "2_to_6", + "2_to_10", +] + + +def case_sort_key(case): + if case in case_order: + return case_order.index(case) + else: + case_order.append(case) + return len(case_order) - 1 + + +def res_to_table(res): + print("| test | case | throughput_gbps | throughput_transfer |") + print("| ---- | ---- | --------------- | ------------------- |") + for test_name, cases in res.items(): + sorted_cases = sorted(cases.items(), key=lambda x: case_sort_key(x[0])) + for case, metrics in sorted_cases: + print( + f'| {test_name} | {case} | {metrics["throughput"]:.2f} | {metrics["reported_throughput"]:.2f} |' + ) + + +def create_metric(commit, bucket, name, tag, value, timestamp): + return { + "commitish": commit[:7], + "bucket": bucket, + "name": name, + "tag": tag, + "value": value, + "timestamp": timestamp, + } + + +def res_to_metro(res, commit, integration): + r = {"metrics": []} + now = int(time.time()) + prefix = "integration" if integration else "iroh" + bucket = "integration" if integration else "netsim" + + for test_name, cases in res.items(): + if not test_name.startswith(prefix): + continue + + suffix = "_".join( + test_name.split("_")[2:] if integration else test_name.split("_")[1:] + ) + suffix = f".{suffix}" if suffix else "" + + for case, metrics in cases.items(): + name = ( + "_".join(test_name.split("_")[1:]) if integration else "throughput_gbps" + ) + tag = f"{case}{suffix}" + + r["metrics"].append( + create_metric(commit, bucket, name, tag, metrics["throughput"], now) + ) + if not integration: + r["metrics"].append( + create_metric( + commit, + bucket, + "reported_throughput_gbps", + tag, + metrics["reported_throughput"], + now, + ) + ) + if suffix == "": + # Report times + r["metrics"].extend( + [ + create_metric( + commit, + bucket, + "time", + f"{case}{suffix}.total", + metrics["elapsed"], + now, + ), + create_metric( + commit, + bucket, + "time", + f"{case}{suffix}.transfer", + metrics["reported_time"], + now, + ), + create_metric( + commit, + bucket, + "time", + f"{case}{suffix}.setup", + metrics["elapsed"] - metrics["reported_time"], + now, + ), + ] + ) + + print(json.dumps(r, indent=4, sort_keys=True)) diff --git a/netsim/process_sniff.py b/netsim/process_sniff.py index f3d19b3..61a3e9c 100644 --- a/netsim/process_sniff.py +++ b/netsim/process_sniff.py @@ -1,409 +1,11 @@ import argparse -import pyshark -import sys -import drawsvg as draw -import math -import json +from sniffer.process import run_viz -def read_topo(path): - if path: - f = open(path) - d = json.load(f) - f.close() - return d - return None - -def load_pcap(path, keylog=None, topo_path=None): - parameters_dict = {} - if keylog: - parameters_dict = {'-o' : "ssl.keylog_file:" + keylog} - cap_json = pyshark.FileCapture(path, custom_parameters=parameters_dict) - packets = [] - str_packet_data = [] - - topo = read_topo(topo_path) - topo_nodes = [] - topo_node_ip_map = {} - if topo: - i=0 - for node in topo['nodes']: - if node['type'] == 'Host' or node['type'] == 'NAT': - topo_nodes.append(node) - topo_node_ip_map[node['ip']] = i - i+=1 - - node_list = [] - ip_set = set() - - for packet in cap_json: - if len(packet.layers) == 2: - if 'arp' in packet: # ignore ARP - continue - if 'ip' in packet: - ip_set.add(packet.ip.src) - ip_set.add(packet.ip.dst) - p = { - 'src': packet.ip.src, - 'dst': packet.ip.dst, - 'type': '', - 'ipv6': False - } - - if 'tcp' in packet: - p['type'] = 'TCP' - elif 'udp' in packet: - p['type'] = 'UDP' - - if 'quic' in packet: - p['type'] = 'QUIC' - elif 'http' in packet: - p['type'] = 'HTTP' - elif 'stun' in packet: - p['type'] = 'STUN' - elif 'icmp' in packet: - p['type'] = 'ICMP' - - packets.append(p) - zk = packet.__str__().replace('\n', '<\\n>') - str_packet_data.append(zk) - - if 'ipv6' in packet: - p = { - 'src': packet.ipv6.src, - 'dst': packet.ipv6.dst, - 'type': 'ICMPv6', - 'ipv6': True - } - - js_packet_data = "const pkt_data = [" - for i in range(len(str_packet_data)): - js_packet_data += '`{}`,'.format(str_packet_data[i]) - js_packet_data += "];" - - for ip in ip_set: - n = { - 'type': 'node', - 'id': ip, - 'ip': ip - } - if ip in topo_node_ip_map: - tn = topo_nodes[topo_node_ip_map[ip]] - if tn['name'].startswith('1_relay'): - n['type'] = 'relay' - n['id'] = tn['name'] - if tn['name'].startswith('iroh'): - n['type'] = 'iroh' - n['id'] = tn['name'].replace('iroh_', '') - elif tn['name'].startswith('n_'): - n['type'] = 'nat' - n['id'] = tn['name'] - - node_list.append(n) - return packets, js_packet_data, node_list - -class NetsimViz(): - - HOVER_JS = """ - function pktAnalysisOnLoad(event) { - console.log('pktAnalysisOnLoad'); - var pkts = document.getElementsByClassName('pkt'); - for (var i = 0; i < pkts.length; i++) { - pkts[i].addEventListener('mouseover', pktMouseOverEvt); - } - var up = document.getElementById('up_ttx'); - var down = document.getElementById('down_ttx'); - up.addEventListener('click', upTTX); - down.addEventListener('click', downTTX); - } - function pktMouseOverEvt(event) { - var target = event.target; - var label = target.getAttribute('data-label'); - var id = target.getAttribute('data-id'); - console.log(label); - console.log(id); - renderTTX(id, 0); - } - function upTTX(event) { - var ttx = document.getElementById('ttx'); - var id = parseInt(ttx.getAttribute('data-pid'), 10); - var offset = parseInt(ttx.getAttribute('data-offset'), 10); - var len = parseInt(ttx.getAttribute('data-len'), 10); - if (offset > 0) { - offset -= 5; - } - if (offset < 0) { - offset = 0; - } - renderTTX(id, offset); - } - function downTTX(event) { - var ttx = document.getElementById('ttx'); - var id = parseInt(ttx.getAttribute('data-pid'), 10); - var offset = parseInt(ttx.getAttribute('data-offset'), 10); - var len = parseInt(ttx.getAttribute('data-len'), 10); - if (offset < len - 48) { - offset += 5; - } - renderTTX(id, offset); - } - function renderTTX(id, offset) { - var ttx = document.getElementById('ttx'); - ttx.setAttribute('data-pid', id); - ttx.setAttribute('data-offset', offset); - var x_pos = ttx.getAttribute('x'); - var lines = pkt_data[id].split("<\\n>"); - console.log('LL', lines.length); - ttx.setAttribute('data-len', lines.length); - var up = document.getElementById('up_ttx'); - var down = document.getElementById('down_ttx'); - if (offset > 0) { - console.log('show up'); - up.setAttribute('visibility', 'visible'); - } else { - up.setAttribute('visibility', 'hidden'); - } - if (lines.length - offset > 48) { - console.log('show down'); - down.setAttribute('visibility', 'visible'); - } else { - down.setAttribute('visibility', 'hidden'); - } - var ih = ''; - for (var i = offset; i < lines.length && i < 48 + offset; i++) { - ih += '' + lines[i].replace('\\t', ' ') + ''; - } - ttx.innerHTML = ih; - } - """ - - def __init__(self): - # TODO make configurable - self.size_x = 1600 - self.size_y = 800 - self.offset_x = 160 - self.batch_time = 1.25 - self.batch_interval = 1 - self.duration = 0 - self.node_size = 60 - self.R = 300 - self.show_node_labels = True - self.pkt_size = 12 - - self.node_color_map = { - 'iroh': '#7c7cff', - 'relay': '#ff7c7c', - 'node': '#7cff7c', - 'nat': '#ff7cff', - } - - self.pkt_color_map = { - 'ICMP': '#8ecae6', - 'ICMPv6': '#219EBC', - 'TCP': '#2a9d8f', - 'UDP': '#ffd60a', - 'STUN': '#035781', - 'HTTP': '#FB8500', - 'QUIC': '#e63946', - } - - def run(self, args): - packets, js_packet_data, node_list = load_pcap(args['path'], args['keylog'], args['topo']) - - self.duration = self.batch_time + self.batch_interval * len(packets) + 1 - - self.d = draw.Drawing(self.size_x, self.size_y, origin='center', - animation_config=draw.types.SyncedAnimationConfig( - self.duration, - show_playback_progress=True, - show_playback_controls=True)) - - self.d.append_javascript(self.HOVER_JS, onload="pktAnalysisOnLoad(event)") - self.d.append_javascript(js_packet_data) - - self.draw_background() - self.draw_legend(-self.size_x/2 + 50, -self.size_y/2 + 120) - self.draw_title('relay__1_to_1', 0, -self.size_y/2 + 50) # TODO correct title - self.draw_ttx() - - nodes = [] - self.node_ip_map = {} - - i = 0 - for node in node_list: - n = { - 'type': node['type'], - 'id': node['id'], - 'draw': self.draw_node(0, 0, node) - } - nodes.append(n) - self.node_ip_map[node['ip']] = i - i+=1 - - i = 0 - for node in nodes: - x, y = self.calculate_node_position(self.R, len(nodes), i) - node['pos'] = [x, y] - self.animate_node_to(node['draw'], x, y, 1) - i+=1 - - for node in nodes: - self.attach(node['draw']) - - self.play(packets, nodes) - - def play(self, packets, nodes): - pkts = [] - - i = 0 - for pp in packets: - pkts.append(self.send_pkt(nodes[self.node_ip_map[pp['src']]], nodes[self.node_ip_map[pp['dst']]], pp, self.batch_time, self.batch_interval, i)) - self.batch_time += self.batch_interval - i+=1 - - for pkt in pkts: - self.attach(pkt) - - def export(self, path): - self.d.save_svg(path) - - def attach(self, items): - for k in items: - self.d.append(k) - - def draw_background(self): - bg = draw.Rectangle(-self.size_x/2, -self.size_y/2, self.size_x, self.size_y, fill='#eee') - self.d.append(bg) - - def draw_ttx(self): - ttx_title = draw.Text('Console', 14, self.size_x/2-605, -self.size_y/2+60, fontWeight='bold', center=True, fill='#666', font_family='Helvetica', text_anchor='left') - ttx = draw.Text([], 12, self.size_x/2-600, -self.size_y/2+90, center=True, fill='#666', font_family='Helvetica', id='ttx', text_anchor='left') - box = draw.Rectangle(self.size_x/2-610, -self.size_y/2+76, 580, self.size_y-180, fill='#ddd', rx='4', ry='4') - - ox = self.size_x/2 - 515 - oy = -self.size_y/2+66 - up = draw.Lines(ox, oy, 10 + ox, oy - 15, 20 + ox, oy, fill='#bbb', close='true', id='up_ttx', visibility='hidden') - - ox = self.size_x/2 - 540 - oy = -self.size_y/2+52 - down = draw.Lines(ox, oy, 10 + ox, oy + 15, 20 + ox, oy, fill='#bbb', close='true', id='down_ttx', visibility='hidden') - self.attach([box, ttx_title, ttx, up, down]) - return [box, ttx_title, up, down, ttx] - - def draw_legend(self, x, y): - res = [] - legend_title = draw.Text('Legend', 16, x, y, font_weight='bold', center=True, fill='#666', font_family='Helvetica') - res.append(legend_title) - i = 1 - for k in self.pkt_color_map: - circle = draw.Circle(x-12, y+i*40, self.pkt_size, fill=self.pkt_color_map[k], stroke=None, stroke_width=0) - label = draw.Text(k, 12, x+8, y+i*40+4, fill='#666', font_family='Helvetica') - res.append(circle) - res.append(label) - i+=1 - - i+=0.5 - circle = draw.Circle(x-12, y+i*40, self.pkt_size, fill='#0000', stroke='#d2a', stroke_width=2) - label = draw.Text('IPv6', 12, x+8, y+i*40+4, fill='#666', font_family='Helvetica') - res.append(circle) - res.append(label) - i+=1 - circle = draw.Circle(x-12, y+i*40, self.pkt_size, fill='#0000', stroke='#84a59d', stroke_width=2) - label = draw.Text('IPv4', 12, x+8, y+i*40+4, fill='#666', font_family='Helvetica') - res.append(circle) - res.append(label) - self.attach(res) - return res - - def draw_title(self, c, x, y): - title = draw.Text(c, 24, x-2*self.offset_x, y, font_weight='bold', center=True, fill='#666', font_family='Helvetica') - self.d.append(title) - return title - - def draw_node(self, x, y, node): - res = [] - box = draw.Rectangle(x-self.node_size/2-self.offset_x, y-self.node_size/2, self.node_size, self.node_size, fill=self.node_color_map[node['type']], rx='4', ry='4') - box_name = draw.Text(node['type'], 16, x-self.offset_x, y+10, center=True, fill='#fff', font_family='Helvetica') - res.append(box) - res.append(box_name) - if self.show_node_labels: - label = draw.Text(node['id'], 14, x-self.offset_x, y+40, center=True, fill='#666', font_family='Helvetica') - res.append(label) - ipls = '' - if node['id'] != node['ip']: - ipls = node['ip'] - ip_label = draw.Text(ipls, 14, x-self.offset_x, y+60, center=True, fill='#666', font_family='Helvetica') - res.append(ip_label) - self.animate_node_to(res, x, y, 0) - self.animate_node_to(res, x, y, 0.5) - return res - - def draw_pkt(self, x, y, pkt, t, id): - res = [] - stroke = None - if pkt['ipv6']: - stroke = '#d2a' - else: - stroke = '#84a59d' - circle = draw.Circle(x-self.offset_x, y, 0, fill=self.pkt_color_map[pkt['type']], stroke=stroke, stroke_width=2, class_='pkt', data_label=pkt['type'], data_id=id) - res.append(circle) - self.animate_pkt_to(res, x, y, 0, t) - return res - - def animate_node_to(self, node, x, y, t): - node[0].add_key_frame(t, x=x-self.node_size/2-self.offset_x, y=y-self.node_size/2) - node[1].add_key_frame(t, x=x-self.offset_x, y=y+10) - i = 2 - if self.show_node_labels: - node[i].add_key_frame(t, x=x-self.offset_x, y=y+40) - node[i+1].add_key_frame(t, x=x-self.offset_x, y=y+60) - i+=2 - - def animate_pkt_to(self, pkt, x, y, r, t): - pkt[0].add_key_frame(t, cx=x-self.offset_x, cy=y, r=r) - - def show_pkt(self, pkt, t): - pkt[0].add_key_frame(t+self.batch_interval/10, r=self.pkt_size) - - def hide_pkt(self, pkt, t): - pkt[0].add_key_frame(t, r=0) - - def send_pkt(self, n_from, n_to, pkt, t, i, pkt_id): - p_from = n_from['pos'] - p_to = n_to['pos'] - p = self.draw_pkt(p_from[0], p_from[1], pkt, t, pkt_id) - self.show_pkt(p, t) - self.animate_pkt_to(p, p_to[0], p_to[1], self.pkt_size, t+i) - self.hide_pkt(p, t+i) - return p - - def calculate_node_position(self, r, node_cnt, i): - x = r*math.cos(i*2*math.pi/node_cnt)-self.offset_x - y = r*math.sin(i*2*math.pi/node_cnt) - return (x, y) - -def run_viz(args): - t_stdout = sys.stdout - class PseudoNonTTY(object): - def __init__(self, underlying): - self.__underlying = underlying - def __getattr__(self, name): - return getattr(self.__underlying, name) - def isatty(self): - return False - - sys.stdout = PseudoNonTTY(sys.stdout) # disable color output on packet data for JS - - viz = NetsimViz() - viz.run(args) - viz.export(args['output']) - - sys.stdout = t_stdout - -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument("path", help = "Input file path") - parser.add_argument("--topo", help = "Topo config file path") - parser.add_argument("--keylog", help = "NSS Keylog file path to decrypt TLS traffic") - parser.add_argument("--output", help = "Output file path") + parser.add_argument("path", help="Input file path") + parser.add_argument("--topo", help="Topo config file path") + parser.add_argument("--keylog", help="NSS Keylog file path to decrypt TLS traffic") + parser.add_argument("--output", help="Output file path") args = parser.parse_args() - run_viz(args) \ No newline at end of file + run_viz(args) diff --git a/netsim/reports_csv.py b/netsim/reports_csv.py index b1aa71e..c53c008 100644 --- a/netsim/reports_csv.py +++ b/netsim/reports_csv.py @@ -1,200 +1,86 @@ import argparse -import json import os -import time +from parsing.reports import * -def res_to_prom(res, commit): - for k, v in res.items(): - for c, t in v.items(): - labels = 'name="%s",case="%s"' % (k, c) - if commit: - labels += ',commit="%s"' % commit - print('throughput{%s} %f' % (labels, t['throughput'])) - print('reported_throughput{%s} %f' % (labels, t['reported_throughput'])) -case_order = ['1_to_1', '1_to_3', '1_to_5', '1_to_10', '2_to_2', '2_to_4', '2_to_6', '2_to_10'] +def collect_files(integration_flag): + files = [] + res = {} + for root, _, fs in os.walk("report"): + for f in fs: + if integration_flag == f.startswith("integration_") and not f.startswith( + "intg_" + ): + ff = os.path.join(root, f) + files.append(ff) + k = ff.split("__") + name = k[0][len("report/") :] + res[name] = {} + return files, res -def case_sort(x): - if x[0] in case_order: - return case_order.index(x[0]) - else: - case_order.append(x[0]) - return len(case_order) - 1 -def res_to_table(res): - print('| test | case | throughput_gbps | throughput_transfer') - print('| ---- | ---- | ---------- | ---------- |') - for k, v in res.items(): - vl = [(g,h) for g,h in v.items()] - vl = sorted(vl, key=case_sort) - for c, t in vl: - print('| %s | %s | %.2f | %.2f' % (k, c, t['throughput'], t['reported_throughput'])) +def update_integration_results(json_data, res, name, case): + for itg in json_data: + for ik, iv in itg.items(): + if ik == "node": + continue + vv = 1 if iv == "true" else 0 + res_key = f"{name}_{ik}" + if res_key not in res: + res[res_key] = {} + res[res_key][case] = vv + return res + -def res_to_metro(res, commit, integration): - r = { - "metrics": [] - } - now = int( time.time() ) - keys = [] - prefix = 'iroh' - if integration: - prefix = 'integration' - for k, v in res.items(): - if k.startswith(prefix): - keys.append(k) - - # print(json.dumps(res, indent=4)) +def update_performance_results(json_data, res, name, case, prom_flag): + throughput = json_data["sum"]["mbits"] / (1000 if not prom_flag else 1) + reported_throughput = json_data["sum"]["reported_mbits"] / ( + 1000 if not prom_flag else 1 + ) + reported_time = json_data["avg"]["reported_time"] + elapsed = json_data["avg"]["elapsed"] - for k in keys: - v = res[k] - suffix_p = k.split('_') - suffix = '_'.join(suffix_p[1:]) - if integration: - suffix = '_'.join(suffix_p[2:]) - if suffix != '': - suffix = '.' + suffix - nm = "throughput_gbps" - if integration: - nm = '_'.join(suffix_p[1:]) - bkt = "netsim" - if integration: - bkt = "integration" - for c, t in v.items(): - if integration: - m = { - "commitish": commit[0:7], - "bucket": bkt, - "name": nm, - "tag": '%s%s' % (c, suffix), - "value": t, - "timestamp": now - } - r["metrics"].append(m) - else: - m = { - "commitish": commit[0:7], - "bucket": bkt, - "name": nm, - "tag": '%s%s' % (c, suffix), - "value": t['throughput'], - "timestamp": now - } - r["metrics"].append(m) - n = { - "commitish": commit[0:7], - "bucket": bkt, - "name": 'reported_throughput_gbps', - "tag": '%s%s' % (c, suffix), - "value": t['reported_throughput'], - "timestamp": now - } - r["metrics"].append(n) - - if suffix == '': - # report time - n = { - "commitish": commit[0:7], - "bucket": bkt, - "name": 'time', - "tag": '%s%s%s' % (c, suffix, '.total'), - "value": t['elapsed'], - "timestamp": now - } - r["metrics"].append(n) - n = { - "commitish": commit[0:7], - "bucket": bkt, - "name": 'time', - "tag": '%s%s%s' % (c, suffix, '.transfer'), - "value": t['reported_time'], - "timestamp": now - } - r["metrics"].append(n) - n = { - "commitish": commit[0:7], - "bucket": bkt, - "name": 'time', - "tag": '%s%s%s' % (c, suffix, '.setup'), - "value": t['elapsed'] - t['reported_time'], - "timestamp": now - } - r["metrics"].append(n) - print(json.dumps(r, indent=4, sort_keys=True)) + res[name][case] = { + "throughput": round(throughput, 2), + "reported_throughput": round(reported_throughput, 2), + "reported_time": round(reported_time, 2), + "elapsed": round(elapsed, 2), + } + return res -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument("--commit", help = "commit hash") - parser.add_argument("--prom", help = "generate output for prometheus", action='store_true') - parser.add_argument("--table", help = "generate output for github comments", action='store_true') - parser.add_argument("--metro", help = "generate output for perf.iroh.computer", action='store_true') - parser.add_argument("--integration", help = "generate output for integration files", action='store_true') + parser.add_argument("--commit", help="commit hash") + parser.add_argument( + "--prom", help="generate output for prometheus", action="store_true" + ) + parser.add_argument( + "--table", help="generate output for github comments", action="store_true" + ) + parser.add_argument( + "--metro", help="generate output for perf.iroh.computer", action="store_true" + ) + parser.add_argument( + "--integration", + help="generate output for integration files", + action="store_true", + ) args = parser.parse_args() - files = [] - for root, dirs, fs in os.walk('report'): - for f in fs: - if args.integration != f.startswith('integration_'): - continue - if f.startswith('intg_'): - continue - files.append(os.path.join(root,f)) - - test_map = {} - case_map = {} - - # format: testname__case__node - for f in files: - k = f.split('__') - name = k[0][len('report/'):] - test_map[name] = '' - case = k[1] - - res = {} - - for k, v in test_map.items(): - res[k] = {} - for c in case_map: - res[k][c] = -1.0 + files, res = collect_files(args.integration) for f in files: - k = f.split('__') - name = k[0][len('report/'):] + k = f.split("__") + name = k[0][len("report/") :] case = k[1] - # print("parsing", f) - json_f = open(f, 'r') + json_f = open(f, "r") json_d = json.load(json_f) if args.integration: - for itg in json_d: - for ik, iv in itg.items(): - if ik == 'node': - continue - vv = iv == 'true' - if vv: - vv = 1 - else: - vv = 0 - if not name + "_" + str(ik) in res: - res[name + "_" + str(ik)] = {} - res[name + "_" + str(ik)][case] = vv + res = update_integration_results(json_d, res, name, case) else: - throughput = json_d['sum']['mbits'] - reported_throughput = json_d['sum']['reported_mbits'] - reported_time = json_d['avg']['reported_time'] - elapsed = json_d['avg']['elapsed'] - if not args.prom: - throughput /= 1000 - reported_throughput /= 1000 - throughput = float("{:.2f}".format(throughput)) - reported_throughput = float("{:.2f}".format(reported_throughput)) - reported_time = float("{:.2f}".format(reported_time)) - elapsed = float("{:.2f}".format(elapsed)) - res[name][case] = {} - res[name][case]['throughput'] = throughput - res[name][case]['reported_throughput'] = reported_throughput - res[name][case]['reported_time'] = reported_time - res[name][case]['elapsed'] = elapsed + res = update_performance_results(json_d, res, name, case, args.prom) if args.prom: res_to_prom(res, args.commit) elif args.table: @@ -202,4 +88,4 @@ def res_to_metro(res, commit, integration): elif args.metro: res_to_metro(res, args.commit, args.integration) else: - print(json.dumps(res, indent=4, sort_keys=True)) \ No newline at end of file + print(json.dumps(res, indent=4, sort_keys=True)) diff --git a/netsim/scripts/project_deps.sh b/netsim/scripts/project_deps.sh new file mode 100755 index 0000000..994e865 --- /dev/null +++ b/netsim/scripts/project_deps.sh @@ -0,0 +1,25 @@ +mkdir -p logs +mkdir -p report +mkdir -p data +mkdir -p keys +mkdir -p bins +mkdir -p viz +cd data +rm -f 1G.bin +rm -f 100M.bin +for i in {1..10}; do + cat ../../fixtures/10MiB.car >> 100M.bin +done +for i in {1..10}; do + cat 100M.bin >> 1G.bin +done +cp ../../fixtures/key.pem ../bins/key.pem +cp ../../fixtures/cert.pem ../bins/cert.pem +cp ../../fixtures/relay.config.toml relay.config.toml +cp ../../fixtures/direct_relay.cfg direct_relay.cfg +cp ../../fixtures/relay.direct.config.toml relay.direct.config.toml +cp ../../fixtures/1MB.bin 1MB.bin +cp ../../fixtures/hello.bin hello.bin +cp ../../fixtures/generate_files.sh generate_files.sh +cp ../../fixtures/bulk_files_test_setup.sh bulk_files_test_setup.sh +./bulk_files_test_setup.sh diff --git a/netsim/scripts/python_deps.sh b/netsim/scripts/python_deps.sh new file mode 100755 index 0000000..f0f674f --- /dev/null +++ b/netsim/scripts/python_deps.sh @@ -0,0 +1 @@ +pip3 install -r scripts/requirements.txt \ No newline at end of file diff --git a/netsim/scripts/requirements.txt b/netsim/scripts/requirements.txt new file mode 100644 index 0000000..fc4e222 --- /dev/null +++ b/netsim/scripts/requirements.txt @@ -0,0 +1,4 @@ +pyshark +drawsvg +dpkt +humanfriendly \ No newline at end of file diff --git a/netsim/scripts/ubuntu_deps.sh b/netsim/scripts/ubuntu_deps.sh new file mode 100755 index 0000000..db7e994 --- /dev/null +++ b/netsim/scripts/ubuntu_deps.sh @@ -0,0 +1,2 @@ +sudo apt install mininet openvswitch-testcontroller iperf tshark smcroute +sudo systemctl enable smcroute.service \ No newline at end of file diff --git a/netsim/setup.sh b/netsim/setup.sh index 1e8677a..5c1f9da 100755 --- a/netsim/setup.sh +++ b/netsim/setup.sh @@ -1,34 +1,3 @@ -sudo apt install mininet -sudo apt install openvswitch-testcontroller -sudo apt install iperf -# sudo apt install wireshark -sudo apt install tshark -sudo pip3 install pyshark -sudo pip3 install drawsvg -sudo pip3 install dpkt -sudo pip3 install humanfriendly -mkdir -p logs -mkdir -p report -mkdir -p data -mkdir -p keys -mkdir -p bins -mkdir -p viz -cd data -rm -f 1G.bin -rm -f 100M.bin -for i in {1..10}; do - cat ../../fixtures/10MiB.car >> 100M.bin -done -for i in {1..10}; do - cat 100M.bin >> 1G.bin -done -cp ../../fixtures/key.pem ../bins/key.pem -cp ../../fixtures/cert.pem ../bins/cert.pem -cp ../../fixtures/relay.config.toml relay.config.toml -cp ../../fixtures/direct_relay.cfg direct_relay.cfg -cp ../../fixtures/relay.direct.config.toml relay.direct.config.toml -cp ../../fixtures/1MB.bin 1MB.bin -cp ../../fixtures/hello.bin hello.bin -cp ../../fixtures/generate_files.sh generate_files.sh -cp ../../fixtures/bulk_files_test_setup.sh bulk_files_test_setup.sh -./bulk_files_test_setup.sh \ No newline at end of file +./scripts/ubuntu_deps.sh +./scripts/python_deps.sh +./scripts/project_deps.sh \ No newline at end of file diff --git a/netsim/sims/integration/iroh.json b/netsim/sims/integration/iroh.json index 59836a1..87559ef 100644 --- a/netsim/sims/integration/iroh.json +++ b/netsim/sims/integration/iroh.json @@ -7,7 +7,7 @@ "visualize": true, "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/1MB.bin", "type": "public", @@ -18,13 +18,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 1, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1mb", @@ -37,7 +37,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/1MB.bin", "type": "public", @@ -48,13 +48,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 3, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1mb" @@ -66,7 +66,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 2, "cmd": "./bins/iroh start --add data/1MB.bin", "type": "public", @@ -77,13 +77,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 2, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1mb" @@ -95,7 +95,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 2, "cmd": "./bins/iroh start --add data/1MB.bin", "type": "public", @@ -106,13 +106,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 4, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1mb" diff --git a/netsim/sims/integration/relay.json b/netsim/sims/integration/relay.json index c862a92..e2f9c0c 100644 --- a/netsim/sims/integration/relay.json +++ b/netsim/sims/integration/relay.json @@ -7,7 +7,7 @@ "visualize": true, "nodes": [ { - "name": "1_relay", + "name": "1_r", "count": 1, "cmd": "./bins/iroh-relay --dev --config-path ./relay.cfg", "type": "public", @@ -17,7 +17,7 @@ } }, { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh --config data/relay.config.toml start --add data/1MB.bin", "type": "public", @@ -28,13 +28,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 1, "cmd": "time ./bins/iroh --config data/relay.config.toml blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1mb", @@ -48,7 +48,7 @@ "visualize": true, "nodes": [ { - "name": "1_relay", + "name": "1_r", "count": 1, "cmd": "./bins/iroh-relay --dev --config-path ./relay.cfg", "type": "public", @@ -58,7 +58,7 @@ } }, { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh --config data/relay.config.toml start --add data/1MB.bin", "type": "nat", @@ -69,13 +69,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 1, "cmd": "time ./bins/iroh --config data/relay.config.toml blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1mb", @@ -89,7 +89,7 @@ "visualize": true, "nodes": [ { - "name": "1_relay", + "name": "1_r", "count": 1, "cmd": "./bins/iroh-relay --dev --config-path ./relay.cfg", "type": "public", @@ -99,7 +99,7 @@ } }, { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh --config data/relay.config.toml start --add data/1MB.bin", "type": "public", @@ -110,13 +110,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 1, "cmd": "time ./bins/iroh --config data/relay.config.toml blobs get --start %s --out STDOUT > /dev/null", "type": "nat", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1mb", @@ -130,7 +130,7 @@ "visualize": true, "nodes": [ { - "name": "1_relay", + "name": "1_r", "count": 1, "cmd": "./bins/iroh-relay --dev --config-path ./relay.cfg", "type": "public", @@ -140,7 +140,7 @@ } }, { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh --config data/relay.config.toml start --add data/1MB.bin", "type": "nat", @@ -151,13 +151,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 1, "cmd": "time ./bins/iroh --config data/relay.config.toml blobs get --start %s --out STDOUT > /dev/null", "type": "nat", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1mb", diff --git a/netsim/sims/iroh/iroh.json b/netsim/sims/iroh/iroh.json index 7ffb0c7..298cfb2 100644 --- a/netsim/sims/iroh/iroh.json +++ b/netsim/sims/iroh/iroh.json @@ -6,7 +6,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -17,13 +17,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 1, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb" @@ -35,7 +35,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -46,13 +46,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 3, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb" @@ -64,7 +64,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -75,13 +75,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 5, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb" @@ -93,7 +93,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -104,13 +104,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 10, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb" @@ -122,7 +122,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 2, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -133,13 +133,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 2, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb" @@ -151,7 +151,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 2, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -162,13 +162,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 4, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb" @@ -180,7 +180,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 2, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -191,13 +191,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 6, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb" @@ -209,7 +209,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 2, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -220,13 +220,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 10, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb" diff --git a/netsim/sims/iroh/iroh_200ms.json b/netsim/sims/iroh/iroh_200ms.json index f413174..0250bb4 100644 --- a/netsim/sims/iroh/iroh_200ms.json +++ b/netsim/sims/iroh/iroh_200ms.json @@ -6,7 +6,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -17,13 +17,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 1, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb", @@ -40,7 +40,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -51,13 +51,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 3, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb", @@ -74,7 +74,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -85,13 +85,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 5, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb", @@ -108,7 +108,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -119,13 +119,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 10, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb", @@ -142,7 +142,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 2, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -153,13 +153,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 2, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb", @@ -176,7 +176,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 2, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -187,13 +187,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 4, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb", @@ -210,7 +210,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 2, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -221,13 +221,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 6, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb", @@ -244,7 +244,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 2, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -255,13 +255,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 10, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb", diff --git a/netsim/sims/iroh/iroh_20ms.json b/netsim/sims/iroh/iroh_20ms.json index e8613fc..7617d0b 100644 --- a/netsim/sims/iroh/iroh_20ms.json +++ b/netsim/sims/iroh/iroh_20ms.json @@ -6,7 +6,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -17,13 +17,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 1, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb", @@ -40,7 +40,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -51,13 +51,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 3, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb", @@ -74,7 +74,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -85,13 +85,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 5, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb", @@ -108,7 +108,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -119,13 +119,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 10, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb", @@ -142,7 +142,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 2, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -153,13 +153,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 2, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb", @@ -176,7 +176,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 2, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -187,13 +187,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 4, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb", @@ -210,7 +210,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 2, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -221,13 +221,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 6, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb", @@ -244,7 +244,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 2, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -255,13 +255,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 10, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb", diff --git a/netsim/sims/iroh/iroh_many_files.json b/netsim/sims/iroh/iroh_many_files.json index 2c82760..0044a9a 100644 --- a/netsim/sims/iroh/iroh_many_files.json +++ b/netsim/sims/iroh/iroh_many_files.json @@ -6,7 +6,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/bulk_1k_x_10k", "type": "public", @@ -17,13 +17,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 1, "cmd": "time ./bins/iroh blobs get --start %s", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_cust_10mb" @@ -35,7 +35,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/bulk_1k_x_10k", "type": "public", @@ -46,13 +46,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 3, "cmd": "time ./bins/iroh blobs get --start %s", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_cust_10mb" @@ -64,7 +64,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/bulk_5k_x_10k", "type": "public", @@ -75,13 +75,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 1, "cmd": "time ./bins/iroh blobs get --start %s", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_cust_50mb" @@ -93,7 +93,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/bulk_5k_x_10k", "type": "public", @@ -104,13 +104,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 3, "cmd": "time ./bins/iroh blobs get --start %s", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_cust_50mb" @@ -122,7 +122,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/bulk_1k_x_1m", "type": "public", @@ -133,13 +133,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 1, "cmd": "time ./bins/iroh blobs get --start %s", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb" @@ -151,7 +151,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/bulk_1k_x_1m", "type": "public", @@ -162,13 +162,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 3, "cmd": "time ./bins/iroh blobs get --start %s", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb" @@ -180,7 +180,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/bulk_mix", "type": "public", @@ -191,13 +191,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 1, "cmd": "time ./bins/iroh blobs get --start %s", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_cust_4.01gb" @@ -209,7 +209,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/bulk_mix", "type": "public", @@ -220,13 +220,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 3, "cmd": "time ./bins/iroh blobs get --start %s", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_cust_4.01gb" diff --git a/netsim/sims/lossy/iroh.json b/netsim/sims/lossy/iroh.json index 8c5945f..64ebbee 100644 --- a/netsim/sims/lossy/iroh.json +++ b/netsim/sims/lossy/iroh.json @@ -6,7 +6,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -17,13 +17,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 1, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb", @@ -40,7 +40,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -51,13 +51,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 3, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb", @@ -74,7 +74,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -85,13 +85,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 5, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb", @@ -108,7 +108,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -119,13 +119,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 10, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb", @@ -142,7 +142,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 2, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -153,13 +153,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 2, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb", @@ -176,7 +176,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 2, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -187,13 +187,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 4, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb", @@ -210,7 +210,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 2, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -221,13 +221,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 6, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb", @@ -244,7 +244,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 2, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -255,13 +255,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 10, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb", diff --git a/netsim/sims/standard/iroh.json b/netsim/sims/standard/iroh.json index 7ffb0c7..298cfb2 100644 --- a/netsim/sims/standard/iroh.json +++ b/netsim/sims/standard/iroh.json @@ -6,7 +6,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -17,13 +17,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 1, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb" @@ -35,7 +35,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -46,13 +46,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 3, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb" @@ -64,7 +64,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -75,13 +75,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 5, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb" @@ -93,7 +93,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 1, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -104,13 +104,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 10, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb" @@ -122,7 +122,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 2, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -133,13 +133,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 2, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb" @@ -151,7 +151,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 2, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -162,13 +162,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 4, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb" @@ -180,7 +180,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 2, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -191,13 +191,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 6, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb" @@ -209,7 +209,7 @@ "description": "", "nodes": [ { - "name": "iroh_srv", + "name": "i_srv", "count": 2, "cmd": "./bins/iroh start --add data/1G.bin", "type": "public", @@ -220,13 +220,13 @@ "param_parser": "iroh_ticket" }, { - "name": "iroh_get", + "name": "i_get", "count": 10, "cmd": "time ./bins/iroh blobs get --start %s --out STDOUT > /dev/null", "type": "public", "connect": { "strategy": "params", - "node": "iroh_srv" + "node": "i_srv" }, "process": "short", "parser": "iroh_1gb" diff --git a/netsim/sniffer/__init__.py b/netsim/sniffer/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/netsim/sniffer/process.py b/netsim/sniffer/process.py new file mode 100644 index 0000000..3ce0958 --- /dev/null +++ b/netsim/sniffer/process.py @@ -0,0 +1,561 @@ +import pyshark +import sys +import drawsvg as draw +import math +import json + + +def read_topo(path): + if path: + f = open(path) + d = json.load(f) + f.close() + return d + return None + + +def load_pcap(path, keylog=None, topo_path=None): + parameters_dict = {} + if keylog: + parameters_dict = {"-o": "ssl.keylog_file:" + keylog} + cap_json = pyshark.FileCapture(path, custom_parameters=parameters_dict) + packets = [] + str_packet_data = [] + + topo = read_topo(topo_path) + topo_nodes = [] + topo_node_ip_map = {} + if topo: + i = 0 + for node in topo["nodes"]: + if node["type"] == "Host" or node["type"] == "NAT": + topo_nodes.append(node) + topo_node_ip_map[node["ip"]] = i + i += 1 + + node_list = [] + ip_set = set() + + for packet in cap_json: + if len(packet.layers) == 2: + if "arp" in packet: # ignore ARP + continue + if "ip" in packet: + ip_set.add(packet.ip.src) + ip_set.add(packet.ip.dst) + p = {"src": packet.ip.src, "dst": packet.ip.dst, "type": "", "ipv6": False} + + if "tcp" in packet: + p["type"] = "TCP" + elif "udp" in packet: + p["type"] = "UDP" + + if "quic" in packet: + p["type"] = "QUIC" + elif "http" in packet: + p["type"] = "HTTP" + elif "stun" in packet: + p["type"] = "STUN" + elif "icmp" in packet: + p["type"] = "ICMP" + + packets.append(p) + zk = packet.__str__().replace("\n", "<\\n>") + str_packet_data.append(zk) + + if "ipv6" in packet: + p = { + "src": packet.ipv6.src, + "dst": packet.ipv6.dst, + "type": "ICMPv6", + "ipv6": True, + } + + js_packet_data = "const pkt_data = [" + for i in range(len(str_packet_data)): + js_packet_data += "`{}`,".format(str_packet_data[i]) + js_packet_data += "];" + + for ip in ip_set: + n = {"type": "node", "id": ip, "ip": ip} + if ip in topo_node_ip_map: + tn = topo_nodes[topo_node_ip_map[ip]] + if tn["name"].startswith("1_relay"): + n["type"] = "relay" + n["id"] = tn["name"] + if tn["name"].startswith("iroh"): + n["type"] = "iroh" + n["id"] = tn["name"].replace("iroh_", "") + if tn["name"].startswith("i_"): + n["type"] = "iroh" + n["id"] = tn["name"].replace("i_", "") + elif tn["name"].startswith("n_"): + n["type"] = "nat" + n["id"] = tn["name"] + + node_list.append(n) + return packets, js_packet_data, node_list + + +class NetsimViz: + + HOVER_JS = """ + function pktAnalysisOnLoad(event) { + console.log('pktAnalysisOnLoad'); + var pkts = document.getElementsByClassName('pkt'); + for (var i = 0; i < pkts.length; i++) { + pkts[i].addEventListener('mouseover', pktMouseOverEvt); + } + var up = document.getElementById('up_ttx'); + var down = document.getElementById('down_ttx'); + up.addEventListener('click', upTTX); + down.addEventListener('click', downTTX); + } + function pktMouseOverEvt(event) { + var target = event.target; + var label = target.getAttribute('data-label'); + var id = target.getAttribute('data-id'); + console.log(label); + console.log(id); + renderTTX(id, 0); + } + function upTTX(event) { + var ttx = document.getElementById('ttx'); + var id = parseInt(ttx.getAttribute('data-pid'), 10); + var offset = parseInt(ttx.getAttribute('data-offset'), 10); + var len = parseInt(ttx.getAttribute('data-len'), 10); + if (offset > 0) { + offset -= 5; + } + if (offset < 0) { + offset = 0; + } + renderTTX(id, offset); + } + function downTTX(event) { + var ttx = document.getElementById('ttx'); + var id = parseInt(ttx.getAttribute('data-pid'), 10); + var offset = parseInt(ttx.getAttribute('data-offset'), 10); + var len = parseInt(ttx.getAttribute('data-len'), 10); + if (offset < len - 48) { + offset += 5; + } + renderTTX(id, offset); + } + function renderTTX(id, offset) { + var ttx = document.getElementById('ttx'); + ttx.setAttribute('data-pid', id); + ttx.setAttribute('data-offset', offset); + var x_pos = ttx.getAttribute('x'); + var lines = pkt_data[id].split("<\\n>"); + console.log('LL', lines.length); + ttx.setAttribute('data-len', lines.length); + var up = document.getElementById('up_ttx'); + var down = document.getElementById('down_ttx'); + if (offset > 0) { + console.log('show up'); + up.setAttribute('visibility', 'visible'); + } else { + up.setAttribute('visibility', 'hidden'); + } + if (lines.length - offset > 48) { + console.log('show down'); + down.setAttribute('visibility', 'visible'); + } else { + down.setAttribute('visibility', 'hidden'); + } + var ih = ''; + for (var i = offset; i < lines.length && i < 48 + offset; i++) { + ih += '' + lines[i].replace('\\t', ' ') + ''; + } + ttx.innerHTML = ih; + } + """ + + def __init__(self): + # TODO make configurable + self.size_x = 1600 + self.size_y = 800 + self.offset_x = 160 + self.batch_time = 1.25 + self.batch_interval = 1 + self.duration = 0 + self.node_size = 60 + self.R = 300 + self.show_node_labels = True + self.pkt_size = 12 + + self.node_color_map = { + "iroh": "#7c7cff", + "relay": "#ff7c7c", + "node": "#7cff7c", + "nat": "#ff7cff", + } + + self.pkt_color_map = { + "ICMP": "#8ecae6", + "ICMPv6": "#219EBC", + "TCP": "#2a9d8f", + "UDP": "#ffd60a", + "STUN": "#035781", + "HTTP": "#FB8500", + "QUIC": "#e63946", + } + + def run(self, args): + packets, js_packet_data, node_list = load_pcap( + args["path"], args["keylog"], args["topo"] + ) + + self.duration = self.batch_time + self.batch_interval * len(packets) + 1 + + self.d = draw.Drawing( + self.size_x, + self.size_y, + origin="center", + animation_config=draw.types.SyncedAnimationConfig( + self.duration, show_playback_progress=True, show_playback_controls=True + ), + ) + + self.d.append_javascript(self.HOVER_JS, onload="pktAnalysisOnLoad(event)") + self.d.append_javascript(js_packet_data) + + self.draw_background() + self.draw_legend(-self.size_x / 2 + 50, -self.size_y / 2 + 120) + self.draw_title("relay__1_to_1", 0, -self.size_y / 2 + 50) # TODO correct title + self.draw_ttx() + + nodes = [] + self.node_ip_map = {} + + i = 0 + for node in node_list: + n = { + "type": node["type"], + "id": node["id"], + "draw": self.draw_node(0, 0, node), + } + nodes.append(n) + self.node_ip_map[node["ip"]] = i + i += 1 + + i = 0 + for node in nodes: + x, y = self.calculate_node_position(self.R, len(nodes), i) + node["pos"] = [x, y] + self.animate_node_to(node["draw"], x, y, 1) + i += 1 + + for node in nodes: + self.attach(node["draw"]) + + self.play(packets, nodes) + + def play(self, packets, nodes): + pkts = [] + + i = 0 + for pp in packets: + pkts.append( + self.send_pkt( + nodes[self.node_ip_map[pp["src"]]], + nodes[self.node_ip_map[pp["dst"]]], + pp, + self.batch_time, + self.batch_interval, + i, + ) + ) + self.batch_time += self.batch_interval + i += 1 + + for pkt in pkts: + self.attach(pkt) + + def export(self, path): + self.d.save_svg(path) + + def attach(self, items): + for k in items: + self.d.append(k) + + def draw_background(self): + bg = draw.Rectangle( + -self.size_x / 2, -self.size_y / 2, self.size_x, self.size_y, fill="#eee" + ) + self.d.append(bg) + + def draw_ttx(self): + ttx_title = draw.Text( + "Console", + 14, + self.size_x / 2 - 605, + -self.size_y / 2 + 60, + fontWeight="bold", + center=True, + fill="#666", + font_family="Helvetica", + text_anchor="left", + ) + ttx = draw.Text( + [], + 12, + self.size_x / 2 - 600, + -self.size_y / 2 + 90, + center=True, + fill="#666", + font_family="Helvetica", + id="ttx", + text_anchor="left", + ) + box = draw.Rectangle( + self.size_x / 2 - 610, + -self.size_y / 2 + 76, + 580, + self.size_y - 180, + fill="#ddd", + rx="4", + ry="4", + ) + + ox = self.size_x / 2 - 515 + oy = -self.size_y / 2 + 66 + up = draw.Lines( + ox, + oy, + 10 + ox, + oy - 15, + 20 + ox, + oy, + fill="#bbb", + close="true", + id="up_ttx", + visibility="hidden", + ) + + ox = self.size_x / 2 - 540 + oy = -self.size_y / 2 + 52 + down = draw.Lines( + ox, + oy, + 10 + ox, + oy + 15, + 20 + ox, + oy, + fill="#bbb", + close="true", + id="down_ttx", + visibility="hidden", + ) + self.attach([box, ttx_title, ttx, up, down]) + return [box, ttx_title, up, down, ttx] + + def draw_legend(self, x, y): + res = [] + legend_title = draw.Text( + "Legend", + 16, + x, + y, + font_weight="bold", + center=True, + fill="#666", + font_family="Helvetica", + ) + res.append(legend_title) + i = 1 + for k in self.pkt_color_map: + circle = draw.Circle( + x - 12, + y + i * 40, + self.pkt_size, + fill=self.pkt_color_map[k], + stroke=None, + stroke_width=0, + ) + label = draw.Text( + k, 12, x + 8, y + i * 40 + 4, fill="#666", font_family="Helvetica" + ) + res.append(circle) + res.append(label) + i += 1 + + i += 0.5 + circle = draw.Circle( + x - 12, + y + i * 40, + self.pkt_size, + fill="#0000", + stroke="#d2a", + stroke_width=2, + ) + label = draw.Text( + "IPv6", 12, x + 8, y + i * 40 + 4, fill="#666", font_family="Helvetica" + ) + res.append(circle) + res.append(label) + i += 1 + circle = draw.Circle( + x - 12, + y + i * 40, + self.pkt_size, + fill="#0000", + stroke="#84a59d", + stroke_width=2, + ) + label = draw.Text( + "IPv4", 12, x + 8, y + i * 40 + 4, fill="#666", font_family="Helvetica" + ) + res.append(circle) + res.append(label) + self.attach(res) + return res + + def draw_title(self, c, x, y): + title = draw.Text( + c, + 24, + x - 2 * self.offset_x, + y, + font_weight="bold", + center=True, + fill="#666", + font_family="Helvetica", + ) + self.d.append(title) + return title + + def draw_node(self, x, y, node): + res = [] + box = draw.Rectangle( + x - self.node_size / 2 - self.offset_x, + y - self.node_size / 2, + self.node_size, + self.node_size, + fill=self.node_color_map[node["type"]], + rx="4", + ry="4", + ) + box_name = draw.Text( + node["type"], + 16, + x - self.offset_x, + y + 10, + center=True, + fill="#fff", + font_family="Helvetica", + ) + res.append(box) + res.append(box_name) + if self.show_node_labels: + label = draw.Text( + node["id"], + 14, + x - self.offset_x, + y + 40, + center=True, + fill="#666", + font_family="Helvetica", + ) + res.append(label) + ipls = "" + if node["id"] != node["ip"]: + ipls = node["ip"] + ip_label = draw.Text( + ipls, + 14, + x - self.offset_x, + y + 60, + center=True, + fill="#666", + font_family="Helvetica", + ) + res.append(ip_label) + self.animate_node_to(res, x, y, 0) + self.animate_node_to(res, x, y, 0.5) + return res + + def draw_pkt(self, x, y, pkt, t, id): + res = [] + stroke = None + if pkt["ipv6"]: + stroke = "#d2a" + else: + stroke = "#84a59d" + circle = draw.Circle( + x - self.offset_x, + y, + 0, + fill=self.pkt_color_map[pkt["type"]], + stroke=stroke, + stroke_width=2, + class_="pkt", + data_label=pkt["type"], + data_id=id, + ) + res.append(circle) + self.animate_pkt_to(res, x, y, 0, t) + return res + + def animate_node_to(self, node, x, y, t): + node[0].add_key_frame( + t, x=x - self.node_size / 2 - self.offset_x, y=y - self.node_size / 2 + ) + node[1].add_key_frame(t, x=x - self.offset_x, y=y + 10) + i = 2 + if self.show_node_labels: + node[i].add_key_frame(t, x=x - self.offset_x, y=y + 40) + node[i + 1].add_key_frame(t, x=x - self.offset_x, y=y + 60) + i += 2 + + def animate_pkt_to(self, pkt, x, y, r, t): + pkt[0].add_key_frame(t, cx=x - self.offset_x, cy=y, r=r) + + def show_pkt(self, pkt, t): + pkt[0].add_key_frame(t + self.batch_interval / 10, r=self.pkt_size) + + def hide_pkt(self, pkt, t): + pkt[0].add_key_frame(t, r=0) + + def send_pkt(self, n_from, n_to, pkt, t, i, pkt_id): + p_from = n_from["pos"] + p_to = n_to["pos"] + p = self.draw_pkt(p_from[0], p_from[1], pkt, t, pkt_id) + self.show_pkt(p, t) + self.animate_pkt_to(p, p_to[0], p_to[1], self.pkt_size, t + i) + self.hide_pkt(p, t + i) + return p + + def calculate_node_position(self, r, node_cnt, i): + x = r * math.cos(i * 2 * math.pi / node_cnt) - self.offset_x + y = r * math.sin(i * 2 * math.pi / node_cnt) + return (x, y) + + +def run_viz(args): + t_stdout = sys.stdout + + class PseudoNonTTY(object): + def __init__(self, underlying): + self.__underlying = underlying + + def __getattr__(self, name): + return getattr(self.__underlying, name) + + def isatty(self): + return False + + try: + sys.stdout = PseudoNonTTY( + sys.stdout + ) # disable color output on packet data for JS + + viz = NetsimViz() + viz.run(args) + viz.export(args["output"]) + + sys.stdout = t_stdout + except Exception as e: + sys.stdout = t_stdout + raise e diff --git a/netsim/sniff.py b/netsim/sniffer/sniff.py similarity index 51% rename from netsim/sniff.py rename to netsim/sniffer/sniff.py index 232c2f0..a8e1499 100644 --- a/netsim/sniff.py +++ b/netsim/sniffer/sniff.py @@ -5,27 +5,45 @@ from struct import unpack from ipaddress import ip_address -HOST_TYPES = ['Host', 'CPULimitedHost', 'NAT'] -SWITCH_TYPES = ['UserSwitch', 'OVSSwitch', 'OVSBridge', 'OVSSwitch', 'IVSSwitch', 'LinuxBridge', 'OVSSwitch'] -CONTROLLER_TYPES = ['Controller', 'OVSController', 'NOX', 'RemoteController', 'Ryu', 'DefaultController', 'NullController'] +HOST_TYPES = ["Host", "CPULimitedHost", "NAT", "EdgeNode", "LinuxRouter"] +SWITCH_TYPES = [ + "UserSwitch", + "OVSSwitch", + "OVSBridge", + "OVSSwitch", + "IVSSwitch", + "LinuxBridge", + "OVSSwitch", +] +CONTROLLER_TYPES = [ + "Controller", + "OVSController", + "NOX", + "RemoteController", + "Ryu", + "DefaultController", + "NullController", +] + def parse_ips(packet): eth_length = 14 eth_header = packet[:eth_length] - eth = unpack('!6s6sH', eth_header) + eth = unpack("!6s6sH", eth_header) eth_protocol = socket.ntohs(eth[2]) if eth_protocol == 8: - ip_header = packet[eth_length:20 + eth_length] - iph = unpack('!BBHHHBBH4s4s', ip_header) - s_addr = socket.inet_ntoa(iph[8]); - d_addr = socket.inet_ntoa(iph[9]); + ip_header = packet[eth_length : 20 + eth_length] + iph = unpack("!BBHHHBBH4s4s", ip_header) + s_addr = socket.inet_ntoa(iph[8]) + d_addr = socket.inet_ntoa(iph[9]) return s_addr, d_addr return None, None -class Sniffer(): + +class Sniffer: def __init__(self, net, output="netsim.pcap"): - self.output=output + self.output = output self.net = net self.nodes = [] @@ -37,61 +55,59 @@ def __init__(self, net, output="netsim.pcap"): self.TopoInfo() def start(self): - self.output_f = open(self.output, 'wb') - self.output_f_viz = open(self.output.replace('.pcap', '.viz.pcap'), 'wb') + self.output_f = open(self.output, "wb") + self.output_f_viz = open(self.output.replace(".pcap", ".viz.pcap"), "wb") self.kill = False - #Start siniffing packets on Mininet interfaces - self.snifferd = threading.Thread( target=self.sniff ) + # Start siniffing packets on Mininet interfaces + self.snifferd = threading.Thread(target=self.sniff) self.snifferd.daemon = True self.snifferd.start() def get_topoinfo(self): - return { - 'nodes': self.nodes, - 'interfaces': self.interfaces - } + return {"nodes": self.nodes, "interfaces": self.interfaces} def TopoInfo(self): - for item , value in self.net.items(): - node = {'name': item, 'type':value.__class__.__name__} - if node['type'] in CONTROLLER_TYPES: - node['ip'] = value.ip - node['port'] = value.port - elif node['type'] in SWITCH_TYPES: - node['dpid'] = value.dpid - elif node['type'] in HOST_TYPES: - node['ip'] = value.IP() + for item, value in self.net.items(): + node = {"name": item, "type": value.__class__.__name__} + if node["type"] in CONTROLLER_TYPES: + node["ip"] = value.ip + node["port"] = value.port + elif node["type"] in SWITCH_TYPES: + node["dpid"] = value.dpid + elif node["type"] in HOST_TYPES: + node["ip"] = value.IP() self.nodes.append(node) for intf in value.intfList(): - t_intf = str(intf.link).replace(intf.name,'').replace('<->','') - if t_intf != 'None': + t_intf = str(intf.link).replace(intf.name, "").replace("<->", "") + if t_intf != "None": self.interfaces.append( { - 'node': node['name'], - 'type': node['type'], - 'interface': intf.name, - 'mac': intf.mac, - 'ip':intf.ip, - 'link': t_intf - }) + "node": node["name"], + "type": node["type"], + "interface": intf.name, + "mac": intf.mac, + "ip": intf.ip, + "link": t_intf, + } + ) self.node_ips.add(intf.ip) - + def intfExists(self, interface, by_mac=False): for intf in self.interfaces: if by_mac: - if intf['mac'] == interface: + if intf["mac"] == interface: return intf elif intf["interface"] == interface: return intf return None - + def nodeExists(self, node): for n in self.nodes: - if n['name'] == node: + if n["name"] == node: return n return None - + def pkt_src_dest_rewrite(self, pkt, sip, smisnode, dip, dmisnode): # if smi ip != sip then rewrite sip # if dmi ip != dip then rewrite dip @@ -101,25 +117,25 @@ def pkt_src_dest_rewrite(self, pkt, sip, smisnode, dip, dmisnode): if not dip in self.node_ips: return pkt epkt = dpkt.ethernet.Ethernet(pkt) - if sip != smisnode['ip']: + if sip != smisnode["ip"]: ip = epkt.data - tip = ip_address(smisnode['ip']).packed + tip = ip_address(smisnode["ip"]).packed ip.src = tip - if dip != dmisnode['ip']: + if dip != dmisnode["ip"]: ip = epkt.data - tip = ip_address(dmisnode['ip']).packed + tip = ip_address(dmisnode["ip"]).packed ip.dst = tip return epkt - + def sniff(self): print("Starting sniffer") try: s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0003)) except socket.error as msg: - print('Error creating socket:' + str(msg[0]) + ' | ' + msg[1]) + print("Error creating socket:" + str(msg[0]) + " | " + msg[1]) sys.exit() - pcapw=dpkt.pcap.Writer(self.output_f) - pcapw_viz=dpkt.pcap.Writer(self.output_f_viz) + pcapw = dpkt.pcap.Writer(self.output_f) + pcapw_viz = dpkt.pcap.Writer(self.output_f_viz) while True: if self.kill: break @@ -132,15 +148,15 @@ def sniff(self): continue except Exception as e: continue - + direction = "incoming" if packet[1][2] == socket.PACKET_OUTGOING: direction = "outgoing" packet = packet[0] - dstMAC = ':'.join('%02x' % b for b in packet[0:6]) - srcMAC = ':'.join('%02x' % b for b in packet[6:12]) + dstMAC = ":".join("%02x" % b for b in packet[0:6]) + srcMAC = ":".join("%02x" % b for b in packet[6:12]) srcMAC = str(srcMAC) dstMAC = str(dstMAC) @@ -150,7 +166,7 @@ def sniff(self): if not smi: print("smi not found", srcMAC) - + if not dmi: # print("dmi not found", dstMAC) continue @@ -158,20 +174,22 @@ def sniff(self): sip, dip = parse_ips(packet) link = self.intfExists(intf["link"]) - src, dst = intf["node"], intf["link"].split('-')[0] + src, dst = intf["node"], intf["link"].split("-")[0] if direction == "incoming": - src, dst = intf["link"].split('-')[0], intf["node"] + src, dst = intf["link"].split("-")[0], intf["node"] src_node = self.nodeExists(src) - smisnode = self.nodeExists(smi['node']) - dmisnode = self.nodeExists(dmi['node']) + smisnode = self.nodeExists(smi["node"]) + dmisnode = self.nodeExists(dmi["node"]) - - if not src_node['type'] in SWITCH_TYPES: - pcapw.writepkt(packet) - wpkt = self.pkt_src_dest_rewrite(packet, sip, smisnode, dip, dmisnode) - pcapw_viz.writepkt(wpkt) + if src_node: + if not src_node["type"] in SWITCH_TYPES: + pcapw.writepkt(packet) + wpkt = self.pkt_src_dest_rewrite( + packet, sip, smisnode, dip, dmisnode + ) + pcapw_viz.writepkt(wpkt) def close(self): if self.snifferd: @@ -179,4 +197,4 @@ def close(self): try: self.output_f.close() except Exception: - pass \ No newline at end of file + pass diff --git a/netsim/util.py b/netsim/util.py new file mode 100644 index 0000000..ee97378 --- /dev/null +++ b/netsim/util.py @@ -0,0 +1,45 @@ +import os + + +def logs_on_error(nodes, prefix, runner_id, code=1, message=None): + node_counts = {} + for node in nodes: + node_counts[node["name"]] = int(node["count"]) + for i in range(int(node["count"])): + node_name = "%s_%d" % (node["name"], i) + log_name = "logs/%s__%s_r%d.txt" % (prefix, node_name, runner_id) + if os.path.isfile(log_name): + print( + "\n################################################################" + ) + print("\n[INFO] Log file: %s" % log_name) + f = open(log_name, "r") + lines = f.readlines() + for line in lines: + print("[INFO][%s__%s] %s" % (prefix, node_name, line.rstrip())) + else: + print("[WARN] log file missing: %s" % log_name) + print("[ERROR] Process has failed with code:", code) + if message: + print("[ERROR] Message:", message) + + +def cleanup_tmp_dirs(temp_dirs): + for temp_dir in temp_dirs: + temp_dir.cleanup() + + +def eject(nodes, prefix, runner_id, temp_dirs): + logs_on_error(nodes, prefix, runner_id) + cleanup_tmp_dirs(temp_dirs) + raise Exception("Netsim run failed: %s" % prefix) + + +def print_route_table(net, runner_id): + router_name = "r0_" + str(runner_id) + print("*** Routing Table on Router:\n") + print(net[router_name].cmd("route")) + print("*** r0 smcroute:\n") + print(net[router_name].cmd("smcroutectl -I smcroute-" + router_name + " show")) + print("*** Multicast ping:\n") + print(net["zbox1-r" + str(runner_id)].cmd("ping -c 3 239.0.0.1"))