diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..1264fd0f --- /dev/null +++ b/.gitignore @@ -0,0 +1,139 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class +.vscode + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ \ No newline at end of file diff --git a/README.md b/README.md index 61eaec49..7d0db9af 100644 --- a/README.md +++ b/README.md @@ -55,6 +55,9 @@ python save_model.py --model yolov4 # Run yolov4 deep sort object tracker on video python object_tracker.py --video ./data/video/test.mp4 --output ./outputs/demo.avi --model yolov4 +# footbal +python object_tracker.py --video data/video/football1.mp4 --output outputs/football1.mp4 + # Run yolov4 deep sort object tracker on webcam (set video flag to 0) python object_tracker.py --video 0 --output ./outputs/webcam.avi --model yolov4 ``` diff --git a/core/__pycache__/backbone.cpython-37.pyc b/core/__pycache__/backbone.cpython-37.pyc index f4126f13..07bf0ff0 100644 Binary files a/core/__pycache__/backbone.cpython-37.pyc and b/core/__pycache__/backbone.cpython-37.pyc differ diff --git a/core/__pycache__/common.cpython-37.pyc b/core/__pycache__/common.cpython-37.pyc index ed6d4e8e..050b866b 100644 Binary files a/core/__pycache__/common.cpython-37.pyc and b/core/__pycache__/common.cpython-37.pyc differ diff --git a/core/__pycache__/config.cpython-37.pyc b/core/__pycache__/config.cpython-37.pyc index 4087e9c4..42730e85 100644 Binary files a/core/__pycache__/config.cpython-37.pyc and b/core/__pycache__/config.cpython-37.pyc differ diff --git a/core/__pycache__/utils.cpython-37.pyc b/core/__pycache__/utils.cpython-37.pyc index 047fde09..ebe1d0b8 100644 Binary files a/core/__pycache__/utils.cpython-37.pyc and b/core/__pycache__/utils.cpython-37.pyc differ diff --git a/core/__pycache__/yolov4.cpython-37.pyc b/core/__pycache__/yolov4.cpython-37.pyc index e83d18e5..463365cb 100644 Binary files a/core/__pycache__/yolov4.cpython-37.pyc and b/core/__pycache__/yolov4.cpython-37.pyc differ diff --git a/data/video/Soccer_field.png b/data/video/Soccer_field.png new file mode 100644 index 00000000..05c5bc56 Binary files /dev/null and b/data/video/Soccer_field.png differ diff --git a/data/video/football1.mp4 b/data/video/football1.mp4 new file mode 100644 index 00000000..8fb27090 Binary files /dev/null and b/data/video/football1.mp4 differ diff --git a/data/video/id3_500f.mp4 b/data/video/id3_500f.mp4 new file mode 100644 index 00000000..b6abd028 Binary files /dev/null and b/data/video/id3_500f.mp4 differ diff --git a/deep_sort/__pycache__/__init__.cpython-37.pyc b/deep_sort/__pycache__/__init__.cpython-37.pyc index 5be45487..91fa30e2 100644 Binary files a/deep_sort/__pycache__/__init__.cpython-37.pyc and b/deep_sort/__pycache__/__init__.cpython-37.pyc differ diff --git a/deep_sort/__pycache__/detection.cpython-37.pyc b/deep_sort/__pycache__/detection.cpython-37.pyc index 2f61e811..83d29e6c 100644 Binary files a/deep_sort/__pycache__/detection.cpython-37.pyc and b/deep_sort/__pycache__/detection.cpython-37.pyc differ diff --git a/deep_sort/__pycache__/iou_matching.cpython-37.pyc b/deep_sort/__pycache__/iou_matching.cpython-37.pyc index bbc4f76c..d25fa530 100644 Binary files a/deep_sort/__pycache__/iou_matching.cpython-37.pyc and b/deep_sort/__pycache__/iou_matching.cpython-37.pyc differ diff --git a/deep_sort/__pycache__/kalman_filter.cpython-37.pyc b/deep_sort/__pycache__/kalman_filter.cpython-37.pyc index 723b1afc..ddc6a1ae 100644 Binary files a/deep_sort/__pycache__/kalman_filter.cpython-37.pyc and b/deep_sort/__pycache__/kalman_filter.cpython-37.pyc differ diff --git a/deep_sort/__pycache__/linear_assignment.cpython-37.pyc b/deep_sort/__pycache__/linear_assignment.cpython-37.pyc index 62d535e3..c0f37954 100644 Binary files a/deep_sort/__pycache__/linear_assignment.cpython-37.pyc and b/deep_sort/__pycache__/linear_assignment.cpython-37.pyc differ diff --git a/deep_sort/__pycache__/nn_matching.cpython-37.pyc b/deep_sort/__pycache__/nn_matching.cpython-37.pyc index ff5f8643..3a34b0b8 100644 Binary files a/deep_sort/__pycache__/nn_matching.cpython-37.pyc and b/deep_sort/__pycache__/nn_matching.cpython-37.pyc differ diff --git a/deep_sort/__pycache__/preprocessing.cpython-37.pyc b/deep_sort/__pycache__/preprocessing.cpython-37.pyc index f55434ec..f5ab14d7 100644 Binary files a/deep_sort/__pycache__/preprocessing.cpython-37.pyc and b/deep_sort/__pycache__/preprocessing.cpython-37.pyc differ diff --git a/deep_sort/__pycache__/track.cpython-37.pyc b/deep_sort/__pycache__/track.cpython-37.pyc index df602205..bb744c8f 100644 Binary files a/deep_sort/__pycache__/track.cpython-37.pyc and b/deep_sort/__pycache__/track.cpython-37.pyc differ diff --git a/deep_sort/__pycache__/tracker.cpython-37.pyc b/deep_sort/__pycache__/tracker.cpython-37.pyc index 1f61f1df..509509ae 100644 Binary files a/deep_sort/__pycache__/tracker.cpython-37.pyc and b/deep_sort/__pycache__/tracker.cpython-37.pyc differ diff --git a/extract_pixel_pos.py b/extract_pixel_pos.py new file mode 100644 index 00000000..5d84ac5e --- /dev/null +++ b/extract_pixel_pos.py @@ -0,0 +1,52 @@ +import cv2 as cv +import sys + +ref_points=[] + +def get_point(event,x,y,flags,param): + if event == cv.EVENT_LBUTTONDOWN: + print("x={},y={}".format(x,y)) + xy="%d,%d"%(x,y) + ref_points.append((x,y)) + cv.circle(frame, (x, y), 1, (255, 0, 0), thickness = 1) + cv.putText(frame,xy,(x,y),cv.FONT_HERSHEY_PLAIN,2.0,(0,0,0),thickness=2) + cv.imshow('Soccer',frame) + + +file_name=sys.argv[1].split('.') +if file_name[1]=='mp4': + video_capture=cv.VideoCapture(sys.argv[1]) + fps = int(video_capture.get(cv.CAP_PROP_FPS)) + width = int(video_capture.get(cv.CAP_PROP_FRAME_WIDTH)) + height = int(video_capture.get(cv.CAP_PROP_FRAME_HEIGHT)) + print("fps:", fps) + print("width:", width) + print("height:", height) + ret_val,frame=video_capture.read() + if ret_val: + cv.namedWindow('Soccer',cv.WINDOW_NORMAL) + cv.setMouseCallback('Soccer',get_point) + else: + print("error get frame") +elif file_name[1]=='png' or file_name[1]=='jpg': + frame=cv.imread(sys.argv[1]) + cv.namedWindow('Soccer',cv.WINDOW_NORMAL) + cv.setMouseCallback('Soccer',get_point) + +def extract_points(): + while 1: + cv.imshow('Soccer',frame) + k=cv.waitKey(1)&0xFF #the wait key must put here + if not cv.getWindowProperty('Soccer',cv.WND_PROP_VISIBLE): + break + if k==27: + break + cv.destroyAllWindows() + return ref_points + +#cmd example +#python extract_pixel_pos.py data/video/id3_500f.mp4 +# +if __name__=="__main__": + extract_points() + print(ref_points) diff --git a/object_tracker.py b/object_tracker.py index 3e37d4b2..d355a011 100644 --- a/object_tracker.py +++ b/object_tracker.py @@ -23,6 +23,7 @@ from deep_sort.detection import Detection from deep_sort.tracker import Tracker from tools import generate_detections as gdet +import collections flags.DEFINE_string('framework', 'tf', '(tf, tflite, trt') flags.DEFINE_string('weights', './checkpoints/yolov4-416', 'path to weights file') @@ -38,6 +39,54 @@ flags.DEFINE_boolean('info', False, 'show detailed info of tracked objects') flags.DEFINE_boolean('count', False, 'count objects being tracked on screen') + +def getColorList(): + dict = collections.defaultdict(list) + + # 白色 + lower_white = np.array([0, 0, 221]) + upper_white = np.array([180, 30, 255]) + color_list = [] + color_list.append(lower_white) + color_list.append(upper_white) + color_list.append([248, 248, 255]) + dict['white'] = color_list + + + #蓝色 + lower_blue = np.array([100, 43, 46]) + upper_blue = np.array([124, 255, 255]) + color_list = [] + color_list.append(lower_blue) + color_list.append(upper_blue) + color_list.append([0, 0, 221]) + dict['blue'] = color_list + + return dict + +def get_color(frame): + hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV) + maxsum = -100 + color = None + color_dict = getColorList() + for d in color_dict: + mask = cv2.inRange(hsv,color_dict[d][0],color_dict[d][1]) + #binary = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)[1] + #binary = cv2.dilate(binary,None,iterations=2) + #cnts, hiera = cv2.findContours(binary.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) + s = sum(mask.flatten()) + #for c in cnts: + # sum+=cv2.contourArea(c) + if s > maxsum : + maxsum = s + color = d + if(color=='blue'): + for d in color_dict: + mask = cv2.inRange(hsv,color_dict[d][0],color_dict[d][1]) + print(sum(mask.flatten())) + + return color + def main(_argv): # Definition of the parameters max_cosine_distance = 0.4 @@ -90,10 +139,14 @@ def main(_argv): codec = cv2.VideoWriter_fourcc(*FLAGS.output_format) out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height)) + color_dict = getColorList() frame_num = 0 + # while video is running while True: return_value, frame = vid.read() + pic = frame.copy() + cv2.imwrite("pic.png", pic) if return_value: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) image = Image.fromarray(frame) @@ -157,8 +210,9 @@ def main(_argv): class_names = utils.read_class_names(cfg.YOLO.CLASSES) # by default allow all classes in .names file - allowed_classes = list(class_names.values()) - + #allowed_classes = list(class_names.values()) + allowed_classes = ['person', 'sports ball'] + # allowed_classes = ['sports ball'] # custom allowed classes (uncomment line below to customize tracker for only people) #allowed_classes = ['person'] @@ -205,14 +259,25 @@ def main(_argv): if not track.is_confirmed() or track.time_since_update > 1: continue bbox = track.to_tlbr() - class_name = track.get_class() - + #class_name = track.get_class() + class_name = str(track.track_id) + # draw bbox on screen - color = colors[int(track.track_id) % len(colors)] - color = [i * 255 for i in color] - cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 2) - cv2.rectangle(frame, (int(bbox[0]), int(bbox[1]-30)), (int(bbox[0])+(len(class_name)+len(str(track.track_id)))*17, int(bbox[1])), color, -1) - cv2.putText(frame, class_name + "-" + str(track.track_id),(int(bbox[0]), int(bbox[1]-10)),0, 0.75, (255,255,255),2) + #color = colors[int(track.track_id) % len(colors)] + #color = [i * 255 for i in color] + cv2.imwrite("p"+str(track.track_id)+".png", pic[int(max(0,min(bbox[1],bbox[3]))):int(max(bbox[1],bbox[3])), int(max(min(bbox[0],bbox[2]),0)):int(max(bbox[2],bbox[0]))]) + color = get_color(pic[int(max(0,min(bbox[1],bbox[3]))):int(max(bbox[1],bbox[3])), int(max(min(bbox[0],bbox[2]),0)):int(max(bbox[2],bbox[0]))]) + temp = [] + for i in color_dict[color][2]: + temp.append(int(i)) + if color=='blue': + print(track.track_id) + if(len(frame)*0.5<= abs(int(bbox[1]-bbox[3]))): + continue + cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), temp, 2) + cv2.rectangle(frame, (int(bbox[0]), int(bbox[1]-5)), (int(bbox[0])+(len(class_name)+len(str(track.track_id)))*7, int(bbox[1])), temp, -1) + #cv2.putText(frame, class_name + "-" + str(track.track_id),(int(bbox[0]), int(bbox[1]-10)),0, 0.3, (255,255,255),2) + cv2.putText(frame, class_name,(int(bbox[0]), int(bbox[1]-10)),0, 0.3, (255,255,255),2) # if enable info flag then print details about each track if FLAGS.info: diff --git a/outputs/demo.avi b/outputs/demo.avi index 735ab0f3..9675ba18 100644 Binary files a/outputs/demo.avi and b/outputs/demo.avi differ diff --git a/perspective_transfrom.py b/perspective_transfrom.py new file mode 100644 index 00000000..b28c858b --- /dev/null +++ b/perspective_transfrom.py @@ -0,0 +1,52 @@ +import cv2 +import sys +import numpy as np + +video_center_point=np.array([940,397],dtype='float32') +video_points=np.array([[1431, 397], [450, 397], [940, 97], [940, 1022]],dtype='float32') +row,col=video_points.shape +for i in range(row): + video_points[i]-=video_center_point + +soccer_field_center=np.array([512,327],dtype='float32') +soccer_field_points=np.array([[594,327],[429,327],[512,26],[512,626]],dtype='float32') +row,col=soccer_field_points.shape +for i in range(row): + soccer_field_points[i]-=soccer_field_center + +soccer_filed_img=cv2.imread('data/video/Soccer_field.png') +M=cv2.getPerspectiveTransform(video_points,soccer_field_points) +#print(M) + +#input a tuple return a tuple +def perspective_transform(people_point): + (x,y)=people_point + people_point=np.array([x,y],dtype='float32') + people_point-=video_center_point + people_point=np.r_[people_point,1] #insert in row + #print(people_point.shape) + + dst_people_point=np.dot(M,people_point) + dst_people_point=dst_people_point[0:2] + dst_people_point+=soccer_field_center + (x,y)=(int(dst_people_point[0]),int(dst_people_point[1])) + return (x,y) + #print((x,y)) + +if __name__=="__main__": + #dst_people_point=cv2.perspectiveTransform(people_point.T,M) + cv2.namedWindow('Perspective',cv2.WINDOW_NORMAL) + soccer_filed_img=cv2.imread('data/video/Soccer_field.png') + print(soccer_filed_img.shape) + (x,y)=perspective_transform((1276,284)) #a simpl people example, remember to input the foot point + cv2.circle(soccer_filed_img,(x,y),5,(255,0,0),thickness=3) + while 1: + cv2.imshow('Perspective',soccer_filed_img) + # k=cv2.waitKey(1)&0xFF #the wait key must put here + # if not cv2.getWindowProperty('Perspective',cv2.WND_PROP_VISIBLE): + # break + if cv2.waitKey(0)&0xFF==27: + break + cv2.destroyAllWindows() + +