Commit 327357b7 authored by Matthew Johnson's avatar Matthew Johnson
Browse files

Import assignment contents to version control

parent becac089
Name:
UWNetID:
Name:
UWNetID:
Instructions to reproduce your results:
TODO
Answers to the questions:
Part 2
1. TODO
2. TODO
3. TODO
4. TODO
5. TODO
Part 3
1. TODO
2. TODO
3. TODO
4. TODO
from mininet.topo import Topo
from mininet.node import CPULimitedHost
from mininet.link import TCLink
from mininet.net import Mininet
from mininet.log import lg, info
from mininet.util import dumpNodeConnections
from mininet.cli import CLI
from subprocess import Popen, PIPE
from time import sleep, time
from multiprocessing import Process
from argparse import ArgumentParser
from monitor import monitor_qlen
import sys
import os
import math
parser = ArgumentParser(description="Bufferbloat tests")
parser.add_argument(
"--bw-host", "-B", type=float, help="Bandwidth of host links (Mb/s)", default=1000
)
parser.add_argument(
"--bw-net",
"-b",
type=float,
help="Bandwidth of bottleneck (network) link (Mb/s)",
required=True,
)
parser.add_argument(
"--delay", type=float, help="Link propagation delay (ms)", required=True
)
parser.add_argument("--dir", "-d", help="Directory to store outputs", required=True)
parser.add_argument(
"--time", "-t", help="Duration (sec) to run the experiment", type=int, default=10
)
parser.add_argument(
"--maxq",
type=int,
help="Max buffer size of network interface in packets",
default=100,
)
# Linux uses CUBIC-TCP by default that doesn't have the usual sawtooth
# behaviour. For those who are curious, invoke this script with
# --cong cubic and see what happens...
# sysctl -a | grep cong should list some interesting parameters.
parser.add_argument(
"--cong", help="Congestion control algorithm to use", default="reno"
)
# Expt parameters
args = parser.parse_args()
class BBTopo(Topo):
"Simple topology for bufferbloat experiment."
def build(self, n=2):
# TODO: create two hosts
# Here I have created a switch. If you change its name, its
# interface names will change from s0-eth1 to newname-eth1.
switch = self.addSwitch("s0")
# TODO: Add links with appropriate characteristics
# Simple wrappers around monitoring utilities. You are welcome to
# contribute neatly written (using classes) monitoring scripts for
# Mininet!
def start_iperf(net):
h1 = net.get("h1")
h2 = net.get("h2")
print("Starting iperf server...")
# For those who are curious about the -w 16m parameter, it ensures
# that the TCP flow is not receiver window limited. If it is,
# there is a chance that the router buffer may not get filled up.
server = h2.popen("iperf -s -w 16m")
# TODO: Start the iperf client on h1. Ensure that you create a
# long lived TCP flow.
# client = ...
def start_qmon(iface, interval_sec=0.1, outfile="q.txt"):
monitor = Process(target=monitor_qlen, args=(iface, interval_sec, outfile))
monitor.start()
return monitor
def start_ping(net):
# TODO: Start a ping train from h1 to h2 (or h2 to h1, does it
# matter?) Measure RTTs every 0.1 second. Read the ping man page
# to see how to do this.
# Hint: Use host.popen(cmd, shell=True). If you pass shell=True
# to popen, you can redirect cmd's output using shell syntax.
# i.e. ping ... > /path/to/ping.
pass
def start_webserver(net):
h1 = net.get("h1")
proc = h1.popen("python webserver.py", shell=True)
sleep(1)
return [proc]
def bufferbloat():
if not os.path.exists(args.dir):
os.makedirs(args.dir)
os.system("sysctl -w net.ipv4.tcp_congestion_control=%s" % args.cong)
topo = BBTopo()
net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink)
net.start()
# This dumps the topology and how nodes are interconnected through
# links.
dumpNodeConnections(net.hosts)
# This performs a basic all pairs ping test.
net.pingAll()
# TODO: Start monitoring the queue sizes. Since the switch I
# created is "s0", I monitor one of the interfaces. Which
# interface? The interface numbering starts with 1 and increases.
# Depending on the order you add links to your network, this
# number may be 1 or 2. Ensure you use the correct number.
qmon = start_qmon(iface="s0-eth2", outfile="%s/q.txt" % (args.dir))
# TODO: Start iperf, webservers, etc.
# start_iperf(net)
# TODO: measure the time it takes to complete webpage transfer
# from h1 to h2 (say) 3 times. Hint: check what the following
# command does: curl -o /dev/null -s -w %{time_total} google.com
# Now use the curl command to fetch webpage from the webserver you
# spawned on host h1 (not from google!)
# Hint: Verify the url by running your curl command without the
# flags. The html webpage should be returned as the response.
# Hint: have a separate function to do this and you may find the
# loop below useful.
start_time = time()
while True:
# do the measurement (say) 3 times.
sleep(5)
now = time()
delta = now - start_time
if delta > args.time:
break
print("%.1fs left..." % (args.time - delta))
# TODO: compute average (and standard deviation) of the fetch
# times. You don't need to plot them. Just note it in your
# README and explain.
# Hint: The command below invokes a CLI which you can use to
# debug. It allows you to run arbitrary commands inside your
# emulated hosts h1 and h2.
# CLI(net)
qmon.terminate()
net.stop()
# Ensure that all processes you create within Mininet are killed.
# Sometimes they require manual killing.
Popen("pgrep -f webserver.py | xargs kill -9", shell=True).wait()
if __name__ == "__main__":
bufferbloat()
"""
Helper module for the plot scripts.
"""
import re
import itertools
import matplotlib as m
import os
m.use("Agg")
import matplotlib.pyplot as plt
import argparse
import math
def read_list(fname, delim=","):
lines = open(fname)
ret = []
for l in lines:
ls = l.strip().split(delim)
ls = list(
map(
lambda e: "0"
if e.strip() == "" or e.strip() == "ms" or e.strip() == "s"
else e,
ls,
)
)
ret.append(ls)
return ret
def ewma(alpha, values):
if alpha == 0:
return values
ret = []
prev = 0
for v in values:
prev = alpha * prev + (1 - alpha) * v
ret.append(prev)
return ret
def col(n, obj=None, clean=lambda e: e):
"""A versatile column extractor.
col(n, [1,2,3]) => returns the nth value in the list
col(n, [ [...], [...], ... ] => returns the nth column in this matrix
col('blah', { ... }) => returns the blah-th value in the dict
col(n) => partial function, useful in maps
"""
if obj == None:
def f(item):
return clean(item[n])
return f
if type(obj) == type([]):
if len(obj) > 0 and (type(obj[0]) == type([]) or type(obj[0]) == type({})):
return map(col(n, clean=clean), obj)
if type(obj) == type([]) or type(obj) == type({}):
try:
return clean(obj[n])
except:
# print(T.colored('col(...): column "%s" not found!' % (n), 'red'))
return None
# We wouldn't know what to do here, so just return None
# print(T.colored('col(...): column "%s" not found!' % (n), 'red'))
return None
def transpose(l):
return zip(*l)
def avg(lst):
return sum(map(float, lst)) / len(lst)
def stdev(lst):
mean = avg(lst)
var = avg(list(map(lambda e: (e - mean) ** 2, lst)))
return math.sqrt(var)
def xaxis(values, limit):
l = len(values)
return zip(*map(lambda p: (p[0] * 1.0 * limit / l, p[1]), enumerate(values)))
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=fillvalue, *args)
def cdf(values):
values.sort()
prob = 0
l = len(values)
x, y = [], []
for v in values:
prob += 1.0 / l
x.append(v)
y.append(prob)
return (x, y)
def parse_cpu_usage(fname, nprocessors=8):
"""Returns (user,system,nice,iowait,hirq,sirq,steal) tuples
aggregated over all processors. DOES NOT RETURN IDLE times."""
data = grouper(nprocessors, open(fname).readlines())
"""Typical line looks like:
Cpu0 : 0.0%us, 1.0%sy, 0.0%ni, 97.0%id, 0.0%wa, 0.0%hi, 2.0%si, 0.0%st
"""
ret = []
for collection in data:
total = [0] * 8
for cpu in collection:
usages = cpu.split(":")[1]
usages = map(lambda e: e.split("%")[0], usages.split(","))
for i in xrange(len(usages)):
total[i] += float(usages[i])
total = map(lambda t: t / nprocessors, total)
# Skip idle time
ret.append(total[0:3] + total[4:])
return ret
def pc95(lst):
l = len(lst)
return sorted(lst)[int(0.95 * l)]
def pc99(lst):
l = len(lst)
return sorted(lst)[int(0.99 * l)]
def coeff_variation(lst):
return stdev(lst) / avg(lst)
This diff is collapsed.
from time import sleep, time
from subprocess import *
import re
default_dir = "."
def monitor_qlen(iface, interval_sec=0.01, fname="%s/qlen.txt" % default_dir):
pat_queued = re.compile(rb"backlog\s[^\s]+\s([\d]+)p")
cmd = "tc -s qdisc show dev %s" % (iface)
ret = []
open(fname, "w").write("")
while 1:
p = Popen(cmd, shell=True, stdout=PIPE)
output = p.stdout.read()
# Not quite right, but will do for now
matches = pat_queued.findall(output)
if matches and len(matches) > 1:
ret.append(matches[1])
t = "%f" % time()
open(fname, "a").write("{},{}\n".format(t, matches[1].decode("utf-8")))
sleep(interval_sec)
# open('qlen.txt', 'w').write('\n'.join(ret))
return
def monitor_devs_ng(fname="%s/txrate.txt" % default_dir, interval_sec=0.01):
"""Uses bwm-ng tool to collect iface tx rate stats. Very reliable."""
cmd = "sleep 1; bwm-ng -t %s -o csv " "-u bits -T rate -C ',' > %s" % (
interval_sec * 1000,
fname,
)
Popen(cmd, shell=True).wait()
"""
Matplotlib parameters to create pretty plots
"""
from matplotlib import rc, rcParams
DEF_AXIS_LEFT = 0.15
DEF_AXIS_RIGHT = 0.95
DEF_AXIS_BOTTOM = 0.1
DEF_AXIS_TOP = 0.95
DEF_AXIS_WIDTH = DEF_AXIS_RIGHT - DEF_AXIS_LEFT
DEF_AXIS_HEIGHT = DEF_AXIS_TOP - DEF_AXIS_BOTTOM
# add_axes takes [left, bottom, width, height]
DEF_AXES = [DEF_AXIS_LEFT, DEF_AXIS_BOTTOM, DEF_AXIS_WIDTH, DEF_AXIS_HEIGHT]
AXIS_2Y_RIGHT = 0.8
AXIS_2Y_WIDTH = AXIS_2Y_RIGHT - DEF_AXIS_LEFT
AXES_2Y = [DEF_AXIS_LEFT, DEF_AXIS_BOTTOM, AXIS_2Y_WIDTH, DEF_AXIS_HEIGHT]
AXES_LABELSIZE = 24
TICK_LABELSIZE = 24
TEXT_LABELSIZE = 24
COLOR_LIGHTGRAY = "#cccccc"
# COLOR_HLINES = '#606060'
COLOR_HLINES = "black"
HLINE_LABELSIZE = 24
HLINE_LINEWIDTH = 2
rc("axes", **{"labelsize": "large", "titlesize": "large", "grid": True})
rc("legend", **{"fontsize": "xx-large"})
rcParams["axes.labelsize"] = AXES_LABELSIZE
rcParams["xtick.labelsize"] = TICK_LABELSIZE
rcParams["ytick.labelsize"] = TICK_LABELSIZE
rcParams["xtick.major.pad"] = 4
rcParams["ytick.major.pad"] = 6
rcParams["figure.subplot.top"] = DEF_AXIS_TOP
rcParams["figure.subplot.bottom"] = DEF_AXIS_BOTTOM
rcParams["figure.subplot.left"] = DEF_AXIS_LEFT
rcParams["figure.subplot.right"] = DEF_AXIS_RIGHT
rcParams["lines.linewidth"] = 2
rcParams["grid.color"] = COLOR_LIGHTGRAY
rcParams["grid.linewidth"] = 0.6
rcParams["ps.useafm"] = True
rcParams["pdf.use14corefonts"] = True
# rcParams['text.usetex'] = True
def quarter_size():
QUARTER_AXIS_LEFT = 0.25
QUARTER_AXIS_RIGHT = 0.92
QUARTER_AXIS_BOTTOM = 0.20
QUARTER_AXIS_TOP = 0.95
QUARTER_AXIS_WIDTH = QUARTER_AXIS_RIGHT - QUARTER_AXIS_LEFT
QUARTER_AXIS_HEIGHT = QUARTER_AXIS_TOP - QUARTER_AXIS_BOTTOM
QUARTER_AXES_LABELSIZE = 40
QUARTER_TICK_LABELSIZE = 40
QUARTER_TEXT_LABELSIZE = 40
rc("axes", **{"labelsize": "xx-large", "titlesize": "xx-large", "grid": True})
rc("legend", **{"fontsize": "xx-large"})
rcParams["axes.labelsize"] = QUARTER_AXES_LABELSIZE
rcParams["xtick.labelsize"] = QUARTER_TICK_LABELSIZE
rcParams["ytick.labelsize"] = QUARTER_TICK_LABELSIZE
rcParams["xtick.major.pad"] = 16
rcParams["ytick.major.pad"] = 20
rcParams["figure.subplot.top"] = QUARTER_AXIS_TOP
rcParams["figure.subplot.bottom"] = QUARTER_AXIS_BOTTOM
rcParams["figure.subplot.left"] = QUARTER_AXIS_LEFT
rcParams["figure.subplot.right"] = QUARTER_AXIS_RIGHT
"""
Plot ping RTTs over time
"""
from helper import *
import plot_defaults
from matplotlib.ticker import MaxNLocator
from pylab import figure
parser = argparse.ArgumentParser()
parser.add_argument(
"--files",
"-f",
help="Ping output files to plot",
required=True,
action="store",
nargs="+",
)
parser.add_argument(
"--freq", help="Frequency of pings (per second)", type=int, default=10
)
parser.add_argument(
"--out", "-o", help="Output png file for the plot.", default=None
) # Will show the plot
args = parser.parse_args()
def parse_ping(fname):
ret = []
lines = open(fname).readlines()
num = 0
for line in lines:
if "bytes from" not in line:
continue
try:
rtt = line.split(" ")[-2]
rtt = rtt.split("=")[1]
rtt = float(rtt)
ret.append([num, rtt])
num += 1
except:
break
return ret
m.rc("figure", figsize=(16, 6))
fig = figure()
ax = fig.add_subplot(111)
for i, f in enumerate(args.files):
data = parse_ping(f)
xaxis = list(map(float, list(col(0, data))))
start_time = xaxis[0]
xaxis = list(map(lambda x: (x - start_time) / args.freq, xaxis))
qlens = list(map(float, col(1, data)))
ax.plot(xaxis, qlens, lw=2)
ax.xaxis.set_major_locator(MaxNLocator(4))
plt.ylabel("RTT (ms)")
plt.grid(True)
if args.out:
plt.savefig(args.out)
else:
plt.show()
"""
Plot queue occupancy over time
"""
from helper import *
import plot_defaults
from matplotlib.ticker import MaxNLocator
from pylab import figure
parser = argparse.ArgumentParser()
parser.add_argument(
"--files",
"-f",
help="Queue timeseries output to one plot",
required=True,
action="store",
nargs="+",
dest="files",
)
parser.add_argument(
"--legend",
"-l",
help="Legend to use if there are multiple plots. File names used as default.",
action="store",
nargs="+",
default=None,
dest="legend",
)
parser.add_argument(
"--out",
"-o",
help="Output png file for the plot.",
default=None, # Will show the plot
dest="out",
)
parser.add_argument(
"--labels",
help="Labels for x-axis if summarising; defaults to file names",
required=False,
default=[],
nargs="+",
dest="labels",
)
parser.add_argument(
"--every",
help="If the plot has a lot of data points, plot one of every EVERY (x,y) point (default 1).",
default=1,
type=int,
)
args = parser.parse_args()
if args.legend is None:
args.legend = []
for file in args.files:
args.legend.append(file)
to_plot = []
def get_style(i):
if i == 0:
return {"color": "red"}
else:
return {"color": "black", "ls": "-."}
m.rc("figure", figsize=(16, 6))
fig = figure()
ax = fig.add_subplot(111)
for i, f in enumerate(args.files):
data = read_list(f)
xaxis = list(map(float, list(col(0, data))))
start_time = xaxis[0]
xaxis = list(map(lambda x: x - start_time, xaxis))
qlens = list(map(float, col(1, data)))
xaxis = xaxis[:: args.every]
qlens = qlens[:: args.every]
ax.plot(xaxis, qlens, label=args.legend[i], lw=2, **get_style(i))
ax.xaxis.set_major_locator(MaxNLocator(4))
plt.ylabel("Packets")
plt.grid(True)
plt.xlabel("Seconds")
if args.out:
print("saving to", args.out)
plt.savefig(args.out)
else:
plt.show()
#!/usr/bin/env bash
# Note: Mininet must be run as root. So invoke this shell script
# using sudo.
time=90