sort多目標跟蹤代碼解讀

sort多目標跟蹤代碼解讀

算法整體流程:
1.讀取每一幀檢測的結果det。其中det.txt如下:第一個數代表幀號,第三個數~第六個數代表目標(x,y,w,h),第七個數代表得分score,其它數據不詳。

1,-1,500,158,30.979,70.299,93.673,-3.70694,-7.16689,0
1,-1,246,218,40.258,91.355,69.358,-11.4773,-5.53043,0
1,-1,648,238,36.706,83.294,55.955,-8.82797,-12.7447,0

每一幀取出的數據爲:

x,    y,     w,        h,        s
500,  158,   30.979,   70.299,   93.673
246,  218,   40.258,   91.355,   69.358
648,  238,   36.706,   83.294,   55.955

2.sort循環更新
1)對於上一幀的跟蹤器,這一幀首先各做一次預測
2)將預測結果異常的跟蹤器存入to_del數組
3)屏蔽出現無效數值的跟蹤器,然後做了壓縮?
4)逆向刪除異常的跟蹤器
5)將檢測結果分配到跟蹤器上,這裏會得到三個list
跟蹤結果與檢測結果匹配上的、未匹配上的檢測結果、未匹配上的跟蹤器
6)對於匹配上的,將檢測結果更新跟蹤器;
對於未匹配上的檢測結果,認爲是新檢測出來的目標,給他送入新的跟蹤器,每天加一個新的跟蹤器,他的id+1。第一幀全部是未匹配到的。
7)獲取跟蹤器的結果

from __future__ import print_function
from numba import jit
import os.path
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from skimage import io
from sklearn.utils.linear_assignment_ import linear_assignment
import glob
import time
import argparse
from filterpy.kalman import KalmanFilter

@jit
def iou(bb_test,bb_gt):
  """
  Computes IUO between two bboxes in the form [x1,y1,x2,y2]
  """
  #大的左上角的點x
  xx1 = np.maximum(bb_test[0], bb_gt[0])
  # 大的左上角的點y
  yy1 = np.maximum(bb_test[1], bb_gt[1])
  # 小的右下角點x
  xx2 = np.minimum(bb_test[2], bb_gt[2])
  # 小的右下角點y
  yy2 = np.minimum(bb_test[3], bb_gt[3])
  #相交部分的區域
  w = np.maximum(0., xx2 - xx1)
  h = np.maximum(0., yy2 - yy1)
  #相交區域的面積
  wh = w * h
  #iou, = a/(s1+s2-a),a是相交的面積,s1是第一個Box面積,s2是第二個box面積
  o = wh / ((bb_test[2]-bb_test[0])*(bb_test[3]-bb_test[1])
    + (bb_gt[2]-bb_gt[0])*(bb_gt[3]-bb_gt[1]) - wh)
  return(o)

def convert_bbox_to_z(bbox):
  """
  Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form
    [x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is
    the aspect ratio
  """
  w = bbox[2]-bbox[0]
  h = bbox[3]-bbox[1]
  x = bbox[0]+w/2.
  y = bbox[1]+h/2.
  s = w*h    #面積
  r = w/float(h)
  #轉成4行一列
  return np.array([x,y,s,r]).reshape((4,1))

def convert_x_to_bbox(x,score=None):
  """
  Takes a bounding box in the centre form [x,y,s,r] and returns it in the form
    [x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right
  """
  w = np.sqrt(x[2]*x[3])
  h = x[2]/w
  if(score==None):
    return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.]).reshape((1,4))
  else:
    return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.,score]).reshape((1,5))


class KalmanBoxTracker(object):
  """
  This class represents the internel state of individual tracked objects observed as bbox.
  """
  count = 0
  def __init__(self,bbox):
    """
    Initialises a tracker using initial bounding box.
    """
    #define constant velocity model
    self.kf = KalmanFilter(dim_x=7, dim_z=4)
    self.kf.F = np.array([[1,0,0,0,1,0,0],[0,1,0,0,0,1,0],[0,0,1,0,0,0,1],[0,0,0,1,0,0,0],  [0,0,0,0,1,0,0],[0,0,0,0,0,1,0],[0,0,0,0,0,0,1]])
    self.kf.H = np.array([[1,0,0,0,0,0,0],[0,1,0,0,0,0,0],[0,0,1,0,0,0,0],[0,0,0,1,0,0,0]])

    self.kf.R[2:,2:] *= 10.
    self.kf.P[4:,4:] *= 1000. #give high uncertainty to the unobservable initial velocities
    self.kf.P *= 10.
    self.kf.Q[-1,-1] *= 0.01
    self.kf.Q[4:,4:] *= 0.01

    self.kf.x[:4] = convert_bbox_to_z(bbox)
    self.time_since_update = 0
    self.id = KalmanBoxTracker.count
    KalmanBoxTracker.count += 1
    self.history = []
    self.hits = 0
    self.hit_streak = 0
    self.age = 0

  def update(self,bbox):
    """
    Updates the state vector with observed bbox.
    """
    self.time_since_update = 0
    self.history = []
    self.hits += 1
    self.hit_streak += 1
    self.kf.update(convert_bbox_to_z(bbox))

  def predict(self):
    """
    Advances the state vector and returns the predicted bounding box estimate.
    """
    if((self.kf.x[6]+self.kf.x[2])<=0):
      self.kf.x[6] *= 0.0
    self.kf.predict()
    self.age += 1
    if(self.time_since_update>0):
      self.hit_streak = 0
    self.time_since_update += 1
    self.history.append(convert_x_to_bbox(self.kf.x))
    return self.history[-1]

  def get_state(self):
    """
    Returns the current bounding box estimate.
    """
    return convert_x_to_bbox(self.kf.x)

def associate_detections_to_trackers(detections,trackers,iou_threshold = 0.3):
  """
  將檢測結果指定給跟蹤目標
  返回匹配到、檢測未匹配到、跟蹤未匹配到
  """
  #如果跟蹤器的目標個數爲0
  if(len(trackers)==0):
    return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,5),dtype=int)
  #iou矩陣,檢測的目標框和跟蹤的目標框的iou
  iou_matrix = np.zeros((len(detections),len(trackers)),dtype=np.float32)

  #對於每個檢測的結果,計算它和每個跟蹤器結果的Iou
  for d,det in enumerate(detections):
    for t,trk in enumerate(trackers):
      iou_matrix[d,t] = iou(det,trk)
  #這裏指的是利用匈牙利算法匹配跟蹤和檢測的iou
  matched_indices = linear_assignment(-iou_matrix)
  print("matched_indices: ", matched_indices)
  '''
  matched_indices:
  [[0 1]
  [1 0]
  [2 2]]
  '''
  unmatched_detections = []
  #對於沒有匹配到的檢測結果,將它存放在unmatched_detections
  for d,det in enumerate(detections):
    if(d not in matched_indices[:,0]):#matched_indices[:,0]代表匹配的檢測器,matched_indices[:,1]匹配的跟蹤器
      unmatched_detections.append(d)#將沒匹配到檢測編號存起來
  unmatched_trackers = []
  for t,trk in enumerate(trackers):
    if(t not in matched_indices[:,1]):
      unmatched_trackers.append(t)

  #低IOU匹配濾除
  matches = []
  for m in matched_indices:
    #如果iou小於閾值的話,將檢測和跟蹤分別存入未匹配檢測、未匹配跟蹤器中
    if(iou_matrix[m[0],m[1]]<iou_threshold):
      unmatched_detections.append(m[0])
      unmatched_trackers.append(m[1])
    else:
      matches.append(m.reshape(1,2))#轉換成1行2列[檢測器, 跟蹤器]
  if(len(matches)==0):
    matches = np.empty((0,2),dtype=int)
  else:
    matches = np.concatenate(matches,axis=0)

  return matches, np.array(unmatched_detections), np.array(unmatched_trackers)



class Sort(object):
  def __init__(self,max_age=1,min_hits=3):
    """
    Sets key parameters for SORT
    """
    self.max_age = max_age
    self.min_hits = min_hits
    self.trackers = []
    self.frame_count = 0

  def update(self,dets):
    """
    Params:
    dets是一組數組(x1,y1,x2,y2,score)
      dets - a numpy array of detections in the format [[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],...]
    Requires: this method must be called once for each frame even with empty detections.
    Returns the a similar array, where the last column is the object ID.
    返回類似的數組,其中最後一列是對象ID。
    NOTE: The number of objects returned may differ from the number of detections provided.
    """
    self.frame_count += 1
    #從現有的跟蹤器獲取預測的位置
    #上一幀目標個數self.trackers
    trks = np.zeros((len(self.trackers),5))#len(self.trackers)初始爲0
    print("trks: ", trks)
    to_del = []
    ret = []
    for t,trk in enumerate(trks):
      pos = self.trackers[t].predict()[0]#對於上一幀的目標,這一幀進行預測
      print("pos: ", pos)
      trk[:] = [pos[0], pos[1], pos[2], pos[3], 0]
      #np.isnan()判斷是否爲空,np.any數組中只有有一個爲true,則返回true
      if(np.any(np.isnan(pos))):
        to_del.append(t)#存放跟蹤座標數據爲空的數據
    #numpy.ma.masked_invaid屏蔽出現無效值的數組, numpy.ma.compress_rows壓縮包含掩碼值2-D數組的整行。
    trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
    for t in reversed(to_del):#逆向刪除異常的目標
      self.trackers.pop(t)
    #將檢測結果指定給跟蹤器
    matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets,trks)
    # 用指定的檢測器,更新匹配到的跟蹤器
    for t, trk in enumerate(self.trackers):
      #匈牙利算法沒匹配到以及匹配到但是iou低於閾值的跟蹤器
      if(t not in unmatched_trks):#如果t是匹配到的目標
        d = matched[np.where(matched[:,1]==t)[0],0]#匹配的跟蹤器的編號與t相等,檢測器的id
        trk.update(dets[d,:][0])#利用檢測器的結果更新卡爾曼

    #對於沒有匹配到的檢測結果,初始化一個新的跟蹤器
    for i in unmatched_dets:
        trk = KalmanBoxTracker(dets[i,:]) 
        self.trackers.append(trk)
    i = len(self.trackers)
    for trk in reversed(self.trackers):
        d = trk.get_state()[0]
        #匹配到的時候hit_streak會加1
        if((trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits)):
          ret.append(np.concatenate((d,[trk.id+1])).reshape(1,-1)) # +1 as MOT benchmark requires positive
        i -= 1
        #remove dead tracklet
        if(trk.time_since_update > self.max_age):
          self.trackers.pop(i)
    if(len(ret)>0):
      return np.concatenate(ret)
    return np.empty((0,5))
    
def parse_args():
    """Parse input arguments."""
    parser = argparse.ArgumentParser(description='SORT demo')
    parser.add_argument('--display', dest='display', help='Display online tracker output (slow) [False]',action='store_true')
    args = parser.parse_args()
    return args

if __name__ == '__main__':
  #所有的視頻序列
  sequences = ['PETS09-S2L1','TUD-Campus','TUD-Stadtmitte','ETH-Bahnhof','ETH-Sunnyday','ETH-Pedcross2','KITTI-13','KITTI-17','ADL-Rundle-6','ADL-Rundle-8','Venice-2']

  args = parse_args()#獲取參數
  display = args.display#是否顯示跟蹤過程
  phase = 'train'
  total_time = 0.0#總共耗時
  total_frames = 0#視頻幀總數
  colours = np.random.rand(32,3) #32x3的隨機矩陣,用於顯示用
  if(display):
    #if not os.path.exists('mot_benchmark'):
      #print('\n\tERROR: mot_benchmark link not found!\n\n    Create a symbolic link to the MOT benchmark\n    (https://motchallenge.net/data/2D_MOT_2015/#download). E.g.:\n\n    $ ln -s /path/to/MOT2015_challenge/2DMOT2015 mot_benchmark\n\n')
      #exit()
    plt.ion()#打開交互模式
    fig = plt.figure() #圖片1
  #如果沒有output文件夾,則創建一個
  if not os.path.exists('output'):
    os.makedirs('output')
  
  for seq in sequences:
    mot_tracker = Sort() #創建一個跟蹤器
    print(seq)
    #讀取檢測文本
    seq_dets = np.loadtxt('data/%s/det/det.txt'%(seq),delimiter=',') #load detections
    with open('output/%s.txt'%(seq),'w') as out_file:
      print("Processing %s."%(seq))
      #seq_dets[:,0].max()))視頻幀數最大值
      for frame in range(int(seq_dets[:,0].max())):
        frame += 1 #detection and frame numbers begin at 1
        dets = seq_dets[seq_dets[:,0]==frame,2:7]#每一幀的數據,第3到第6個數據
        dets[:,2:4] += dets[:,0:2] #獲取右下角的座標=左上角座標+寬和高
        total_frames += 1

        if(display):
          ax1 = fig.add_subplot(111, aspect='equal')#1x1第一個子圖
          fn = 'data/%s/img1/%06d.jpg' % (seq, frame)#圖像數據
          #fn = 'mot_benchmark/%s/%s/img1/%06d.jpg'%(phase,seq,frame)
          im =io.imread(fn)#讀取每一幀圖像
          ax1.imshow(im)#顯示圖像
          plt.title(seq+' Tracked Targets')

        start_time = time.time()
        #print(dets)
        trackers = mot_tracker.update(dets)#利用檢測的結果更新跟蹤器,返回一個5個數的數組
        #print(trackers)
        cycle_time = time.time() - start_time
        total_time += cycle_time

        for d in trackers:
          print('%d,%d,%.2f,%.2f,%.2f,%.2f,1,-1,-1,-1'%(frame,d[4],d[0],d[1],d[2]-d[0],d[3]-d[1]),file=out_file)
          if(display):
            d = d.astype(np.int32)#轉換成整形
            ax1.add_patch(patches.Rectangle((d[0],d[1]),d[2]-d[0],d[3]-d[1],fill=False,lw=3,ec=colours[d[4]%32,:]))
            #ax1.set_adjustable('box-forced')

        if(display):
          fig.canvas.flush_events()
          plt.draw()
          ax1.cla()

  print("Total Tracking took: %.3f for %d frames or %.1f FPS"%(total_time,total_frames,total_frames/total_time))
  if(display):
    print("Note: to get real runtime results run without the option: --display")

暫時未理解部分:

1.matched_indices = linear_assignment(-iou_matrix)
2.trks = np.ma.compress_rows(np.ma.masked_invalid(trks))

缺點:
1.這個算法直接利用已標註號的檢測的結果,作爲算法中的一部分。
2.待補充。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章