擁塞控制在mininet中的仿真

  • 查看操作系統中支持的擁塞控制版本
cat /proc/sys/net/ipv4/tcp_allowed_congestion_control
  • 通過socket配置擁塞控制算法
int set_congestion_type(int fd,char *cc){
    char optval[TCP_CC_NAME_MAX];
    memset(optval,0,TCP_CC_NAME_MAX);
    strncpy(optval,cc,TCP_CC_NAME_MAX);
    int length=strlen(optval)+1;
    int rc=setsockopt(fd,IPPROTO_TCP, TCP_CONGESTION, (void*)optval,length);
    if(rc!=0){
        printf("cc is not supprt\n");
    }
    return rc;
}

 mininet仿真拓補構建:

#!/usr/bin/python
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.cli import CLI
from mininet.link import TCLink
import time
#    ___r1____
#   /          \0  1
# h1            r3---h2
#             
#
bottleneckbw=4
nonbottlebw=500;  
max_queue_size =bottleneckbw*1000*100/(1500*8) 
net = Mininet( cleanup=True )
h1 = net.addHost('h1',ip='10.0.1.1')
r1 = net.addHost('r1',ip='10.0.1.2')
r3 = net.addHost('r3',ip='10.0.5.1')
h2 = net.addHost('h2',ip='10.0.5.2')
c0 = net.addController('c0')
net.addLink(h1,r1,intfName1='h1-eth0',intfName2='r1-eth0',cls=TCLink , bw=nonbottlebw, delay='20ms', max_queue_size=max_queue_size)
net.addLink(r1,r3,intfName1='r1-eth1',intfName2='r3-eth0',cls=TCLink , bw=bottleneckbw, delay='20ms', max_queue_size=max_queue_size)
net.addLink(r3,h2,intfName1='r3-eth1',intfName2='h2-eth0',cls=TCLink , bw=nonbottlebw, delay='10ms', max_queue_size=max_queue_size)
net.build()
h1.cmd("ifconfig h1-eth0 10.0.1.1/24")
h1.cmd("route add default gw 10.0.1.2 dev h1-eth0")

r1.cmd("ifconfig r1-eth0 10.0.1.2/24")
r1.cmd("ifconfig r1-eth1 10.0.2.1/24")
r1.cmd("ip route add to 10.0.1.0/24 via 10.0.1.1")
r1.cmd("ip route add to 10.0.2.0/24 via 10.0.2.2")
r1.cmd("ip route add to 10.0.5.0/24 via 10.0.2.2")
r1.cmd('sysctl net.ipv4.ip_forward=1')

r3.cmd("ifconfig r3-eth0 10.0.2.2/24")
r3.cmd("ifconfig r3-eth1 10.0.5.1/24")
r3.cmd("ip route add to 10.0.1.0/24 via 10.0.2.1")
r3.cmd("ip route add to 10.0.2.0/24 via 10.0.2.1")
r3.cmd("ip route add to 10.0.5.0/24 via 10.0.5.2")
r3.cmd('sysctl net.ipv4.ip_forward=1')

h2.cmd("ifconfig h2-eth0 10.0.5.2/24")
h2.cmd("route add default gw 10.0.5.1")

net.start()
time.sleep(1)
CLI(net)
net.stop()

 h1中啓動兩個tcp流,各向server端傳輸100MB的數據。server端每隔5秒,輸出收到的數據。格式爲 id time length。
 根據log抽取每個客戶端的數據發送速率。腳本:

#!/usr/bin/python
delimiter="_"

class Client:
    def __init__(self,prefix,id):
       self.id=id
       name=prefix+delimiter+str(id)+".txt"
       self.fout=open(name,'w')
       self.samples=0;
       self.last_time=0;
       self.bytes=0;
    def __del__(self):
        self.fout.close()
    def OnNewSample(self,id,ts,len):
        if id!=self.id:
            return
        sec=float(ts)/1000
        if self.samples==0:
            self.fout.write(str(sec)+"\t"+str(0)+"\n");
        else:
            previous_bytes=self.bytes
            inc_bytes=float(len-previous_bytes)
            inc_time=(ts-self.last_time)*1000;
            if ts>self.last_time:
                rate=inc_bytes*8/inc_time #in Mbps
                self.fout.write(str(sec)+"\t"+str(rate)+"\n");
        self.samples=self.samples+1
        self.last_time=ts
        self.bytes=len
        
prefix="tcp_client_rate"
clients={}
log_in="server_log.txt"
# id  ts  length
with open(log_in) as txtData:
    for line in txtData.readlines():
        lineArr = line.strip().split()
        id=lineArr[0]
        time=int(lineArr[1])
        len=int(lineArr[2])
        if clients.has_key(id):
            clients.get(id).OnNewSample(id,time,len)
        else:
            client=Client(prefix,id)
            clients[id]=client
            client.OnNewSample(id,time,len)
clients.clear()        

 畫圖:

#! /bin/sh
file1=tcp_client_rate_1.txt
file2=tcp_client_rate_2.txt
output=tcp
gnuplot<<!
set xlabel "time/s" 
set ylabel "rate/Mbps"
set xrange [0:300]
set yrange [0:4]
set term "png"
set output "${output}_bw.png"
plot "${file1}" u 1:2 title "flow1" with lines lw 2,\
"${file2}" u 1:2 title "flow2" with lines lw 2
set output
exit
!

 後來我測試的時候,發現帶寬只能在2-3Mbps之間波動。把鏈路帶寬調整到32M,4個數據流分享帶寬。每個數據流的理論帶寬應該是8Mbps。我猜測是TCP send buffer的原因。於是就在代碼中增加配置buf的部分。

void TcpClient::SetSendBufSize(int len){
    if(sockfd_<0){
        return ;
    }   
    int nSndBufferLen =len;
    int nLen          = sizeof(int);
    setsockopt(sockfd_, SOL_SOCKET, SO_SNDBUF, (char*)&nSndBufferLen, nLen);
}
void TcpClient::SetRecvBufSize(int len){
    if(sockfd_<0){
        return ;
    }
    int nRcvBufferLen =len;
    int nLen          = sizeof(int);
    setsockopt(sockfd_, SOL_SOCKET, SO_RCVBUF, (char*)&nRcvBufferLen, nLen);
}

 測試後,問題依舊:
在這裏插入圖片描述
 原因在這裏:
···
void TcpClient::NotifiWrite(){
if(sendByte_<totalByte_){
uint32_t millis=random_.nextInt(0,100);
NextWriteEvent(millis);
}
}
void TcpClient::NextWriteEvent(int millis){
struct timeval tv;
struct event_base evb=thread_->getEventBase();
event_assign(&write_event_, evb, -1, 0,WriteEventCallback, (void
)this);
evutil_timerclear(&tv);
tv.tv_sec = millis/1000;
tv.tv_usec=(millis%1000)*1000;
event_add(&write_event_, &tv);
}
···
 我在這裏每次發送10個包,下次發包間隔間隔是0-100毫秒之間的隨機數。期望帶寬:
B=1500bytes10850ms2.4Mbps B=\frac{1500bytes*10*8}{50ms}\approx 2.4Mbps

 更改之後,發現帶寬有所提高,但是同理論帶寬仍有一段距離。使用iperf測試,socket使用默認的緩衝區,TCP可以很容易達到最大的帶寬。仍是一個bug。後來懷疑是libevent的問題,libevent在應用層使用進行數據讀取。實在懶得去查到底是哪裏引入的問題。我把redis中的異步IO庫拿了出來,代碼不再依賴libevent。經測試,block_client能夠很快到達最大帶寬。代碼從22號寫到了25號。
 測試結果,瓶頸鍊路帶寬20Mbps,一共四條數據流。
在這裏插入圖片描述
 擁塞控制的RTT不公平性測試。仿真拓補:

#!/usr/bin/python
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.link import TCLink
from mininet.cli import CLI
import time
import datetime
import subprocess
import os,signal
import sys
#    ___r1____
#   /          \0  1
# h1            r3---h2
#  \           /2
#   ---r2-----
bottleneckbw=20
nonbottlebw=500;
max_rtt=300
bottleneckQ=bottleneckbw*1000*max_rtt/(1500*8)
nonbottleneckQ=nonbottlebw*1000*max_rtt/(1500*8)
net = Mininet( cleanup=True )
h1 = net.addHost('h1',ip='10.0.1.1')
r1 = net.addHost('r1',ip='10.0.1.2')
r2 = net.addHost('r2',ip='10.0.3.2')
r3 = net.addHost('r3',ip='10.0.5.1')
h2 = net.addHost('h2',ip='10.0.5.2')
c0 = net.addController('c0')
net.addLink(h1,r1,intfName1='h1-eth0',intfName2='r1-eth0',cls=TCLink , bw=nonbottlebw, delay='10ms', max_queue_size=nonbottleneckQ)
net.addLink(r1,r3,intfName1='r1-eth1',intfName2='r3-eth0',cls=TCLink , bw=nonbottlebw, delay='10ms', max_queue_size=nonbottleneckQ)
net.addLink(r3,h2,intfName1='r3-eth1',intfName2='h2-eth0',cls=TCLink , bw=bottleneckbw, delay='20ms', max_queue_size=bottleneckQ)
net.addLink(h1,r2,intfName1='h1-eth1',intfName2='r2-eth0',cls=TCLink , bw=nonbottlebw, delay='20ms', max_queue_size=nonbottleneckQ)
net.addLink(r2,r3,intfName1='r2-eth1',intfName2='r3-eth2',cls=TCLink , bw=nonbottlebw, delay='30ms', max_queue_size=nonbottleneckQ)

net.build()

h1.cmd("ifconfig h1-eth0 10.0.1.1/24")
h1.cmd("ifconfig h1-eth1 10.0.3.1/24")
h1.cmd("ip route flush all proto static scope global")
h1.cmd("ip route add 10.0.1.1/24 dev h1-eth0 table 5000")
h1.cmd("ip route add default via 10.0.1.2 dev h1-eth0 table 5000")

h1.cmd("ip route add 10.0.3.1/24 dev h1-eth1 table 5001")
h1.cmd("ip route add default via 10.0.3.2 dev h1-eth1 table 5001")
h1.cmd("ip rule add from 10.0.1.1 table 5000")
h1.cmd("ip rule add from 10.0.3.1 table 5001")
h1.cmd("ip route add default gw 10.0.1.2  dev h1-eth0")
#that be a must or else a tcp client would not know how to route packet out
h1.cmd("route add default gw 10.0.1.2  dev h1-eth0") #would not work for the second part when a tcp client bind a address


r1.cmd("ifconfig r1-eth0 10.0.1.2/24")
r1.cmd("ifconfig r1-eth1 10.0.2.1/24")
r1.cmd("ip route add to 10.0.1.0/24 via 10.0.1.1")
r1.cmd("ip route add to 10.0.2.0/24 via 10.0.2.2")
r1.cmd("ip route add to 10.0.5.0/24 via 10.0.2.2")
r1.cmd('sysctl net.ipv4.ip_forward=1')

r3.cmd("ifconfig r3-eth0 10.0.2.2/24")
r3.cmd("ifconfig r3-eth1 10.0.5.1/24")
r3.cmd("ifconfig r3-eth2 10.0.4.2/24")
r3.cmd("ip route add to 10.0.1.0/24 via 10.0.2.1")
r3.cmd("ip route add to 10.0.2.0/24 via 10.0.2.1")
r3.cmd("ip route add to 10.0.5.0/24 via 10.0.5.2")
r3.cmd("ip route add to 10.0.4.0/24 via 10.0.4.1")
r3.cmd("ip route add to 10.0.3.0/24 via 10.0.4.1")
r3.cmd('sysctl net.ipv4.ip_forward=1')

r2.cmd("ifconfig r2-eth0 10.0.3.2/24")
r2.cmd("ifconfig r2-eth1 10.0.4.1/24")
r2.cmd("ip route add to 10.0.3.0/24 via 10.0.3.1")
r2.cmd("ip route add to 10.0.4.0/24 via 10.0.4.2")
r2.cmd("ip route add to 10.0.5.0/24 via 10.0.4.2")
r2.cmd('sysctl net.ipv4.ip_forward=1')

h2.cmd("ifconfig h2-eth0 10.0.5.2/24")
h2.cmd("route add default gw 10.0.5.1")
#ping -I src dst
net.start()
time.sleep(1)
CLI(net)
net.stop()
print "stop"

 路徑p1,h1(10.0.1.1)到h2,25個TCP連接,編號1-25;路徑p2,h1(10.0.3.1)到h2,25個TCP連接,編號26-50。
 所有的流均採用BBR算法。BBR的特性還是很明顯的,傳輸時延大的路徑p2經由的數據流獲取更多的吞吐量。
在這裏插入圖片描述
 所有的流均採用Reno算法:
在這裏插入圖片描述
 所有的流均採用Cubic算法:
在這裏插入圖片描述
 另外發現,在socket非阻塞模式,調用write函數,不能保證全部寫入。代碼標號1處,會出現written_bytes<len的情況。

int TcpClient::WriteMessage(const char *msg, int len){
	int written_bytes=0;
	if(sockfd_<=0){
		return written_bytes;
	}
	//1
	written_bytes=write(sockfd_,msg,len);
    if(written_bytes<=0){
        if(errno == EWOULDBLOCK || errno == EAGAIN){
		written_bytes=0;
        }else{
	    written_bytes=0;
            LOG(INFO)<<"write error "<<errno;
            CHECK(0);
        }
    }
	return written_bytes;
}

 代碼下載地址[1]。
[1] tcp-congestion-mininet

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章