diff --git a/Ideas.ipynb b/Ideas.ipynb new file mode 100644 index 000000000..ac1f9ba67 --- /dev/null +++ b/Ideas.ipynb @@ -0,0 +1,459 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Extract frames from videos" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "the number of frames: 1824\n", + "Totally save 183 imgs\n", + "Frames per second using video.get(cv2.CAP_PROP_FPS) : 29.97002997002997\n" + ] + } + ], + "source": [ + "# coding=utf-8\n", + "# 使用OpenCV视频中提取帧图片并保存(cv2.VideoCapture)\n", + "import os\n", + "import cv2\n", + "import shutil\n", + "import time\n", + "\n", + "# 全局变量\n", + "VIDEO_PATH = 'assets/20230101_000613_vflip.MP4' # 视频地址\n", + "EXTRACT_FOLDER = 'assets' # 存放帧图片的位置\n", + "EXTRACT_FREQUENCY = 10 # 帧提取频率\n", + "\n", + "\n", + "# 主操作\n", + "def extract_frames(video_path, dst_folder, index):\n", + " # 实例化视频对象\n", + " video = cv2.VideoCapture(video_path)\n", + " frame_count = 0\n", + "\n", + " # 循环遍历视频中的所有帧\n", + " while True:\n", + " # 逐帧读取\n", + " _, frame = video.read()\n", + " if frame is None:\n", + " break\n", + " # 按照设置的频率保存图片\n", + " if frame_count % EXTRACT_FREQUENCY == 0:\n", + " # 设置保存文件名\n", + " save_path = \"{}/{:>03d}.jpg\".format(dst_folder, index)\n", + " # 保存图片\n", + " cv2.imwrite(save_path, frame)\n", + " index += 1 # 保存图片数+1\n", + " frame_count += 1 # 读取视频帧数+1\n", + "\n", + " # 视频总帧数\n", + " print(f'the number of frames: {frame_count}')\n", + " # 打印出所提取图片的总数\n", + " print(f\"Totally save {(index - 1):d} imgs\")\n", + "\n", + " # 计算FPS 方法一 get()\n", + " (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.') # Find OpenCV version\n", + " # (major_ver, minor_ver, subminor_ver) = (4, 5, 4)\n", + " if int(major_ver) < 3:\n", + " fps = video.get(cv2.cv.CV_CAP_PROP_FPS) # 获取当前版本opencv的FPS\n", + " print(\"Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}\".format(fps))\n", + " else:\n", + " fps = video.get(cv2.CAP_PROP_FPS) # 获取当前版本opencv的FPS\n", + " print(\"Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}\".format(fps))\n", + "\n", + " # 最后释放掉实例化的视频\n", + " video.release()\n", + "\n", + "\n", + "def main():\n", + " # 递归删除之前存放帧图片的文件夹,并新建一个\n", + " try:\n", + " shutil.rmtree(EXTRACT_FOLDER)\n", + " except OSError:\n", + " pass\n", + "\n", + " current_time = time.localtime()\n", + " save_folder = os.path.join(\n", + " EXTRACT_FOLDER, time.strftime(\"%Y_%m_%d_%H_%M_%S\", current_time)\n", + " )\n", + " os.makedirs(save_folder, exist_ok=True)\n", + " # 抽取帧图片,并保存到指定路径\n", + " extract_frames(VIDEO_PATH, save_folder, 1)\n", + "\n", + "\n", + "if __name__ == '__main__':\n", + " main()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Crop Slices" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import cv2\n", + "\n", + "img = cv2.imread(\"assets/test_frames/001.jpg\")\n", + "\n", + "# # 防止键盘事件导致中断\n", + "# while True:\n", + "\n", + "# cv2.imshow('img', img)\n", + "\n", + "# key = cv2.waitKey(0)\n", + "\n", + "# if(key & 0xFF == ord('q')):\n", + "# break\n", + "# elif(key & 0xFF == ord('s')):\n", + "# cv2.imwrite(\"outputs_imgs/outputs_slices/save.jpg\", img)\n", + "# else:\n", + "# print(key)\n", + "\n", + "# cv2.destroyAllWindows()\n", + "\n", + "height = img.shape[0]\n", + "width = img.shape[1]\n", + "print(\"Image height is \", height, \" and width is \", width)\n", + "stride = 64\n", + "slice_size = (320, 320)\n", + "print(\"Crop height is \", slice_size[0], \" and width is \", slice_size[1])\n", + "\n", + "count = 1\n", + "for y in range(0, height, stride):\n", + " for x in range(0, width, stride):\n", + " box = (x, y, x + slice_size[0], y + slice_size[1])\n", + " slice_img = img[y:y + slice_size[1], x:x + slice_size[0]]\n", + " \n", + " crop_height_actual, crop_width_actual = slice_img.shape[:2]\n", + " if crop_height_actual < slice_size[0] or crop_width_actual < slice_size[1]:\n", + " continue\n", + "\n", + " # 保存裁剪后的图像\n", + " cv2.imwrite('outputs_imgs/outputs_slices/cropped_image_{}.jpg'.format(count), slice_img)\n", + " print(\"裁剪后的图像 {}.jpg 已保存。\".format(count))\n", + " count += 1\n", + " # cropped_image = img[64:320+64, 64:320+64]\n", + " # cv2.imwrite('outputs_imgs/outputs_slices/cropped_image_1.jpg', cropped_image)\n", + " # print(\"Slice img width is \", slice_img.shape[0], \" and height is \", slice_img.shape[1])\n", + " # if slice_img.shape[0] == 320 and slice_img.shape[1] == 320:\n", + " # cv2.imwrite(\"outputs_imgs/outputs_slices\", slice_img)\n", + "# cv2.namedWindow(\"Cropped images\", cv2.WINDOW_NORMAL)\n", + "# cv2.resizeWindow(\"Cropped images\", 800, 600)\n", + "# cv2.imshow(\"Cropped image \", slice_img)\n", + "# cv2.waitKey(10)\n", + "# cv2.destroyAllWindows()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import cv2\n", + "\n", + "# 加载图像\n", + "image = cv2.imread('assets/test_frames/001.jpg')\n", + "\n", + "# 定义裁剪尺寸\n", + "crop_width, crop_height = 320, 320\n", + "\n", + "# 定义裁剪步长\n", + "step_x, step_y = 64, 64\n", + "\n", + "# 获取图像尺寸\n", + "image_height, image_width = image.shape[:2]\n", + "\n", + "# 循环裁剪并保存\n", + "count = 1\n", + "for y in range(0, image_height, step_y):\n", + " for x in range(0, image_width, step_x):\n", + " # 裁剪图像\n", + " crop = image[y:y+crop_height, x:x+crop_width]\n", + "\n", + " # 确保裁剪区域不超出图像边界\n", + " crop_height_actual, crop_width_actual = crop.shape[:2]\n", + " if crop_height_actual < crop_height or crop_width_actual < crop_width:\n", + " continue\n", + "\n", + " # 保存裁剪后的图像\n", + " cv2.imwrite('outputs_imgs/outputs_slices/cropped_image_{}.jpg'.format(count), crop)\n", + " print(\"裁剪后的图像 {}.jpg 已保存。\".format(count))\n", + "\n", + " count += 1\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 对不同地址的同一张图像测试" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import cv2\n", + "\n", + "frame = cv2.imread('assets/001.jpg')\n", + "\n", + "height, width = frame.shape[:2]\n", + "\n", + "frame_1 = frame[height-320:height, width-320:width]\n", + "cv2.namedWindow(\"test\", cv2.WINDOW_NORMAL);\n", + "cv2.resizeWindow('window',800,600)#更改窗口的大小\n", + "\n", + "cv2.imshow('test', frame_1)\n", + "cv2.waitKey(0)\n", + "cv2.destroyAllWindows()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 测试NMS" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[12321. 29241. 11211. 9191. 8736. 10656.]\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAigAAAGzCAYAAAAFROyYAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy80BEi2AAAACXBIWXMAAA9hAAAPYQGoP6dpAAA4A0lEQVR4nO3de3xU5Z3H8e+EJGMuTEIIyUAJyK1oBGwLFGftgitZLqZQC10LpUKVyiLB5eJSxXoBtzUWd8uqrdhut6it1Iov0a2CGrmEItFSKpVbU1EQWpmJgJkwwQTCPPsH5piBAJkwIc8kn/frdV6vmXOeOfM8E86P75zbuIwxRgAAABZJaO0OAAAAnI6AAgAArENAAQAA1iGgAAAA6xBQAACAdQgoAADAOgQUAABgHQIKAACwDgEFAABYh4DSBixatEgul0uHDh2K6Xp/9atf6bLLLlNSUpIyMzNjum4AbcO7776rUaNGKSMjQy6XSy+88EJrdwltRGJrdwB2+stf/qLvfOc7GjNmjO68806lpqa2dpcAWGjatGnau3evfvjDHyozM1NDhgzRihUrVFFRoblz57Z29xDHCCho1IYNGxQOh/Xwww+rb9++rd0dABb65JNPVFZWpu9///uaPXu2M3/FihXasWMHAQUXhEM8aFRFRYUkxfTQzrFjx2K2LgCt76OPPpIU2zpxNuFwWDU1NS3+PrAHAaUNOXTokG644QZ5PB517txZc+bMaXSD/vWvf63BgwcrJSVFWVlZmjRpkg4cOOAsv/TSS3XfffdJkrp06SKXy6VFixY5yx977DFdccUVcrvd6tatm4qKilRZWRnxHtdcc40GDBigrVu3avjw4UpNTdVdd90lSaqtrdV9992nvn37yu12Ky8vT9/73vdUW1t73jHWr3fXrl36p3/6J6Wmpupzn/uclixZEtFuw4YNcrlcevbZZ7V48WJ97nOfU8eOHfWNb3xDwWBQtbW1mjt3rnJycpSenq6bbrrpjPcvKSnRV77yFWVmZio9PV39+/d3xgC0ZR988IFmzZql/v37KyUlRZ07d9a//Mu/aN++fU6bRYsWqWfPnpKkBQsWyOVy6dJLL9U111yjl19+WR988IFcLpczv15Tt3+Xy6XZs2fr6aefdurNK6+8ctY+X3rppfrqV7+qTZs26ctf/rIuueQS9e7dW0899VREuyeeeEIul0ubNm3Sv/3bv6lLly7KzMzUv/7rv+r48eOqrKzU1KlT1alTJ3Xq1Enf+973ZIyJWMczzzyjwYMHq2PHjvJ4PBo4cKAefvjhZn7aOBsO8bQhN9xwgy699FIVFxfrzTff1COPPKKPP/44YgP94Q9/qHvuuUc33HCDvvvd7+qjjz7So48+quHDh+vtt99WZmam/vu//1tPPfWUVq1apWXLlik9PV2DBg2SdKooLV68WAUFBbr11ltVXl6uZcuWacuWLXrjjTeUlJTkvNfhw4c1duxYTZo0Sd/+9reVm5urcDis8ePHa9OmTZoxY4Yuv/xybd++XUuXLtVf//rXJp1g9/HHH2vMmDGaMGGCbrjhBj333HO64447NHDgQI0dOzaibXFxsVJSUnTnnXdqz549evTRR5WUlKSEhAR9/PHHWrRokd5880098cQT6tWrl+69915J0s6dO/XVr35VgwYN0v333y+32609e/bojTfeiMFfCrDbli1btHnzZk2aNEndu3fXvn37tGzZMl1zzTXatWuXUlNTNWHCBGVmZmrevHmaPHmyrrvuOqWnpystLU3BYFB/+9vftHTpUklSenq6JEW9/a9bt07PPvusZs+erezs7Iig05g9e/boG9/4hqZPn65p06bpl7/8pb7zne9o8ODBuuKKKyLa3nbbbfJ6vVq8eLHefPNN/fznP1dmZqY2b96sHj166IEHHtDq1av10EMPacCAAZo6daqkU19cJk+erJEjR+pHP/qRJGn37t164403NGfOnBh8+nAYxL377rvPSDLjx4+PmD9r1iwjyfz5z382xhizb98+06FDB/PDH/4wot327dtNYmJixPz6dX700UfOvIqKCpOcnGxGjRplTp486cz/yU9+YiSZX/7yl868ESNGGEnm8ccfj3ivX/3qVyYhIcH8/ve/j5j/+OOPG0nmjTfeOOdY69f71FNPOfNqa2uN1+s1EydOdOatX7/eSDIDBgwwx48fd+ZPnjzZuFwuM3bs2Ij1+nw+07NnT+f50qVLzxg/0F4cO3bsjHllZWVnbHt79+41ksxDDz0U0bawsDBie6oXzfYvySQkJJidO3c2qc89e/Y0kszGjRudeRUVFcbtdpvbb7/dmbd8+XIjyYwePdqEw2Fnvs/nMy6Xy8ycOdOZV1dXZ7p3725GjBjhzJszZ47xeDymrq6uSf1C83GIpw0pKiqKeH7bbbdJklavXi1Jev755xUOh3XDDTfo0KFDzuT1etWvXz+tX7/+nOt//fXXdfz4cc2dO1cJCZ/907nlllvk8Xj08ssvR7R3u9266aabIuatXLlSl19+uS677LKIPlx77bWSdN4+SKe+jX372992nicnJ+vLX/6y3n///TPaTp06NWKvzrBhw2SM0c033xzRbtiwYTpw4IDq6uokfXZM/cUXX1Q4HD5vn4C2JCUlxXl84sQJHT58WH379lVmZqb+9Kc/NXu90W7/I0aMUH5+fpPXn5+fr3/8x390nnfp0kX9+/dvtDZMnz5dLpfLeV5fG6ZPn+7M69Chg4YMGRLx+szMTFVXV6ukpKTJ/ULzEFDakH79+kU879OnjxISEpzjxu+++66MMerXr5+6dOkSMe3evds5MfZsPvjgA0lS//79I+YnJyerd+/ezvJ6n/vc55ScnBwx791339XOnTvPeP/Pf/7zknTePkhS9+7dIwqLJHXq1Ekff/zxGW179OgR8TwjI0OSlJeXd8b8cDisYDAoSfrmN7+pq6++Wt/97neVm5urSZMm6dlnnyWsoF345JNPdO+99yovL09ut1vZ2dnq0qWLKisrnW2kOaLd/nv16hXV+k/f3qXY1IaGr581a5Y+//nPa+zYserevbtuvvnmc54bg+bjHJQ27PT/xMPhsFwul9asWaMOHTqc0b7+OHGsNPwW1rAPAwcO1I9//ONGX3N6cWhMY32XdMaJbOdqe751pKSkaOPGjVq/fr1efvllvfLKK/rtb3+ra6+9Vq+99tpZXw+0BbfddpuWL1+uuXPnyufzOTdhmzRp0gWF9Gi3/8ZqyLm0VG1o+PqcnBxt27ZNr776qtasWaM1a9Zo+fLlmjp1qp588smo+otzI6C0Ie+++27EN449e/YoHA47J5b16dNHxhj16tXL+cYSjfoz9svLy9W7d29n/vHjx7V3714VFBScdx19+vTRn//8Z40cOfKMAGWbhIQEjRw5UiNHjtSPf/xjPfDAA/r+97+v9evXN2msQLx67rnnNG3aNP3Xf/2XM6+mpuaMq/XO5mzbdjxt/+eSnJyscePGady4cQqHw5o1a5Z+9rOf6Z577uG+UTHEIZ425Kc//WnE80cffVSSnCtbJkyYoA4dOmjx4sVnfKMwxujw4cPnXH9BQYGSk5P1yCOPRLz+f//3fxUMBlVYWHjePt5www36+9//rv/5n/85Y9knn3yi6urq867jYjhy5MgZ877whS9IUpMuhwbiWYcOHc6oEY8++qhOnjzZpNfXX8lzunjZ/s/l9DqZkJDgXOVIbYgt9qC0IXv37tX48eM1ZswYlZWV6de//rW+9a1v6corr5R06tvLD37wAy1cuFD79u3T9ddfr44dO2rv3r1atWqVZsyYoX//938/6/q7dOmihQsXavHixRozZozGjx+v8vJyPfbYYxo6dGjEiatnc+ONN+rZZ5/VzJkztX79el199dU6efKk/vKXv+jZZ5/Vq6++qiFDhsTsM2mu+++/Xxs3blRhYaF69uypiooKPfbYY+revbu+8pWvtHb3gBb11a9+Vb/61a+UkZGh/Px8lZWV6fXXX1fnzp2b9PrBgwfrt7/9rebPn6+hQ4cqPT1d48aNi5vt/1y++93v6siRI7r22mvVvXt3ffDBB3r00Uf1hS98QZdffnlrd69NIaC0Ib/97W9177336s4771RiYqJmz56thx56KKLNnXfeqc9//vNaunSpFi9eLOnUcd9Ro0Zp/Pjx532PRYsWqUuXLvrJT36iefPmKSsrSzNmzNADDzwQcbXM2SQkJOiFF17Q0qVLnXutpKamqnfv3pozZ06zDj21hPHjx2vfvn365S9/qUOHDik7O1sjRozQ4sWLnZPpgLbq4YcfVocOHfT000+rpqZGV199tV5//XWNHj26Sa+fNWuWtm3bpuXLl2vp0qXq2bOnxo0bFzfb/7l8+9vf1s9//nM99thjqqyslNfr1Te/+U0tWrQo4upGXDiXaezsIQAAgFZE3AMAANYhoAAAAOsQUAAAgHUIKAAAwDoEFAAAYB0CCgAAsE5c3gclHA7rww8/VMeOHeP6dslAPDPG6OjRo+rWrVvc3P+B2gG0rmjqRlwGlA8//LBJPyoHoOUdOHBA3bt3b+1uNAm1A7BDU+pGXAaUjh07Sjo1QI/H08q9Adqnqqoq5eXlOdtjPKB2AK0rmroRlwGlftesx+OhyACtLJ4OlVA7ADs0pW7Ex4FjAADQrhBQAACAdQgoAADAOgQUAABgHQIKAACwDgEFAABYh4ACAACsQ0ABAADWIaAAAADrEFAAAIB1CCgAAMA6cflbPLBfOBzWoUOHJEmpqalx9Xst7ZUxRseOHZMkZWdnn/en0IGWEA5Ln5YOpaZKlA77GSN9WjqUnS3FqnQQUNAiDh06pNzc3NbuBpopEAgoJyentbuBdujQIYnSEb8CASlWpYOvSAAAwDrsQUGLSE1NdR4HAgGlpaW1Ym/QFNXV1c5er4Z/P+BiavhPLxCQKB32q67+bK9XLEsHAQUtouE5J2lpaQSUOMM5Q2gtDf/ppaURUOJNLEsHh3gAAIB1CCgAAMA6BBQAAGAdAgoAALAOAQUAAFiHgAIAAKxDQAEAANYhoAAAAOsQUAAAgHUIKAAAwDoEFAAAYB0CCgAAsA4BBQAAWIeAAgAArENAAQAA1rmggPLggw/K5XJp7ty5zryamhoVFRWpc+fOSk9P18SJExUIBCJet3//fhUWFio1NVU5OTlasGCB6urqLqQrAOIEdQNAUzQ7oGzZskU/+9nPNGjQoIj58+bN0+9+9zutXLlSpaWl+vDDDzVhwgRn+cmTJ1VYWKjjx49r8+bNevLJJ/XEE0/o3nvvbf4oAMQF6gaAJjPNcPToUdOvXz9TUlJiRowYYebMmWOMMaaystIkJSWZlStXOm13795tJJmysjJjjDGrV682CQkJxu/3O22WLVtmPB6Pqa2tbfT9ampqTDAYdKYDBw4YSSYYDDan+7gIQqGQkWQkmVAo1NrdQRNE+zcLBoNRbYcXu24YQ+2IR6GQMdKpidIRH6L5m0VTN5q1B6WoqEiFhYUqKCiImL9161adOHEiYv5ll12mHj16qKysTJJUVlamgQMHKjc312kzevRoVVVVaefOnY2+X3FxsTIyMpwpLy+vOd0G0Ioudt2QqB1APIs6oDzzzDP605/+pOLi4jOW+f1+JScnKzMzM2J+bm6u/H6/06ZhkalfXr+sMQsXLlQwGHSmAwcORNttAK2oNeqGRO0A4lliNI0PHDigOXPmqKSkRJdccklL9ekMbrdbbrf7or0fgNhprbohUTuAeBbVHpStW7eqoqJCX/rSl5SYmKjExESVlpbqkUceUWJionJzc3X8+HFVVlZGvC4QCMjr9UqSvF7vGWfn1z+vbwOg7aBuAGiOqALKyJEjtX37dm3bts2ZhgwZoilTpjiPk5KStHbtWuc15eXl2r9/v3w+nyTJ5/Np+/btqqiocNqUlJTI4/EoPz8/RsMCYAvqBoDmiOoQT8eOHTVgwICIeWlpaercubMzf/r06Zo/f76ysrLk8Xh02223yefz6aqrrpIkjRo1Svn5+brxxhu1ZMkS+f1+3X333SoqKmJXLNAGUTcANEdUAaUpli5dqoSEBE2cOFG1tbUaPXq0HnvsMWd5hw4d9NJLL+nWW2+Vz+dTWlqapk2bpvvvvz/WXQEQJ6gbAE7nMsaY1u5EtKqqqpSRkaFgMCiPx9Pa3UEjqqurlZ6eLkkKhUJKS0tr5R7hfKL9m8XjdhiPfW5vqqulT/8ZKhSSKB32i+ZvFs02yG/xAAAA6xBQAACAdQgoAADAOgQUAABgHQIKAACwDgEFAABYh4ACAACsQ0ABAADWIaAAAADrEFAAAIB1CCgAAMA6BBQAAGAdAgoAALAOAQUAAFiHgAIAAKxDQAEAANYhoAAAAOsQUAAAgHUIKAAAwDoEFAAAYB0CCgAAsA4BBQAAWIeAAgAArENAAQAA1iGgAAAA6xBQAACAdQgoAADAOgQUAABgHQIKAACwDgEFAABYh4ACAACsQ0ABAADWIaAAAADrEFAAAIB1CCgAAMA6BBQAAGAdAgoAALAOAQUAAFiHgAIAAKxDQAEAANYhoAAAAOtEFVCWLVumQYMGyePxyOPxyOfzac2aNc7ya665Ri6XK2KaOXNmxDr279+vwsJCpaamKicnRwsWLFBdXV1sRgPAStQOANFKjKZx9+7d9eCDD6pfv34yxujJJ5/U1772Nb399tu64oorJEm33HKL7r//fuc1qampzuOTJ0+qsLBQXq9Xmzdv1sGDBzV16lQlJSXpgQceiNGQANiG2gEgauYCderUyfziF78wxhgzYsQIM2fOnLO2Xb16tUlISDB+v9+Zt2zZMuPxeExtbW2T3zMYDBpJJhgMNrvfaFmhUMhIMpJMKBRq7e6gCaL9m13odkjtQGNCIWOkUxOlIz5E8zeLZhts9jkoJ0+e1DPPPKPq6mr5fD5n/tNPP63s7GwNGDBACxcu1LFjx5xlZWVlGjhwoHJzc515o0ePVlVVlXbu3HnW96qtrVVVVVXEBCA+UTsANEVUh3gkafv27fL5fKqpqVF6erpWrVql/Px8SdK3vvUt9ezZU926ddM777yjO+64Q+Xl5Xr++eclSX6/P6LASHKe+/3+s75ncXGxFi9eHG1XAViE2gEgGlEHlP79+2vbtm0KBoN67rnnNG3aNJWWlio/P18zZsxw2g0cOFBdu3bVyJEj9d5776lPnz7N7uTChQs1f/5853lVVZXy8vKavT4AFx+1A0A0oj7Ek5ycrL59+2rw4MEqLi7WlVdeqYcffrjRtsOGDZMk7dmzR5Lk9XoVCAQi2tQ/93q9Z31Pt9vtnP1fPwGIL9QOANG44PughMNh1dbWNrps27ZtkqSuXbtKknw+n7Zv366KigqnTUlJiTwej7OrF0D7QO0AcC5RHeJZuHChxo4dqx49eujo0aNasWKFNmzYoFdffVXvvfeeVqxYoeuuu06dO3fWO++8o3nz5mn48OEaNGiQJGnUqFHKz8/XjTfeqCVLlsjv9+vuu+9WUVGR3G53iwwQQOujdgCIWjSXEt18882mZ8+eJjk52XTp0sWMHDnSvPbaa8YYY/bv32+GDx9usrKyjNvtNn379jULFiw441Kiffv2mbFjx5qUlBSTnZ1tbr/9dnPixIlousGlgnGAy4zjT0teZkztQFNxmXH8aanLjF3GGNN68ah5qqqqlJGRoWAwyDFlS1VXVys9PV2SFAqFlJaW1so9wvlE+zeLx+0wHvvc3lRXS5/+M1QoJFE67BfN3yyabZDf4gEAANaJ+jJjtF3hcFiHDh2SdOo24y6Xq9nrqq6ubvSxbYwxzg3BsrOzlZBAZgcAGxBQ4Dh06NAZN8OKhZZYZ0sIBALKyclp7W4AAMQhHgAAYCH2oMDR8NdjA4HABZ3YWl1d7ew5udB1taSG/Ww4fgBA6yKgwNHwnJO0tLSYhYpYrqslXcg5NwCA2OIQDwAAsA4BBQAAWIeAAgAArENAAQAA1iGgAAAA6xBQAACAdbjMGI5wOOw8DoVCZyyL5jb4DW9vX1FRYe1lxg37GQgEnHF17txZNTU1krgFPnA+4bD0aXlQaqp0IVfsN/xlDIt/JSOCMdKnv5ih7GyJchEbBBQ46gOIJHm93pitt3fv3jFbV0vq06dPo/O5BT5wbocOSS3xixZx8isZEQIBiXIRG+Q8AABgHfagwNHwVu/vv/9+xF6DUCjk7FV5//33z3vIprq62tlz0pT2raVhP3fs2KEBAwZIkt577z1njwq3wAfOreEmEghIF7K5V1d/tufkQtd1sTTsM+UidggocDT1Vvc5OTlNCijRtG8tDfvZpUsX53F6errzmFvgA+fWcBNJS4tdqIjlui4WykXscIgHAABYh4ACAACsQ0ABAADWIaAAAADrEFAAAIB1CCgAAMA6BBQAAGAdAgoAALAOAQUAAFiHgAIAAKxDQAEAANYhoAAAAOsQUAAAgHUIKAAAwDoEFAAAYB0CCgAAsA4BBQAAWIeAAgAArENAAQAA1iGgAAAA6xBQAACAdQgoAADAOgQUAABgHQIKAACwDgEFAABYJ6qAsmzZMg0aNEgej0cej0c+n09r1qxxltfU1KioqEidO3dWenq6Jk6cqEAgELGO/fv3q7CwUKmpqcrJydGCBQtUV1cXm9EAsBK1A0C0ogoo3bt314MPPqitW7fqj3/8o6699lp97Wtf086dOyVJ8+bN0+9+9zutXLlSpaWl+vDDDzVhwgTn9SdPnlRhYaGOHz+uzZs368knn9QTTzyhe++9N7ajAmAVageAqJkL1KlTJ/OLX/zCVFZWmqSkJLNy5Upn2e7du40kU1ZWZowxZvXq1SYhIcH4/X6nzbJly4zH4zG1tbVnfY+amhoTDAad6cCBA0aSCQaDF9p9NBAIBIwkI8kEAoGIZaFQyFkWCoXOu65o27eWhv08ffzx0P9YivZvFgwGL2g7pHa0HaGQMdKp6UI3l1iu62KJxz7HUjTjj6ZuNPsclJMnT+qZZ55RdXW1fD6ftm7dqhMnTqigoMBpc9lll6lHjx4qKyuTJJWVlWngwIHKzc112owePVpVVVXON6nGFBcXKyMjw5ny8vKa220ArYzaAaApog4o27dvV3p6utxut2bOnKlVq1YpPz9ffr9fycnJyszMjGifm5srv98vSfL7/REFpn55/bKzWbhwoYLBoDMdOHAg2m4DaGXUDgDRSIz2Bf3799e2bdsUDAb13HPPadq0aSotLW2JvjncbrfcbneLvgeAlkXtABCNqANKcnKy+vbtK0kaPHiwtmzZoocffljf/OY3dfz4cVVWVkZ8EwoEAvJ6vZIkr9erP/zhDxHrqz9Tv74NgLaJ2gEgGhd8H5RwOKza2loNHjxYSUlJWrt2rbOsvLxc+/fvl8/nkyT5fD5t375dFRUVTpuSkhJ5PB7l5+dfaFcAxBFqB4BziWoPysKFCzV27Fj16NFDR48e1YoVK7Rhwwa9+uqrysjI0PTp0zV//nxlZWXJ4/Hotttuk8/n01VXXSVJGjVqlPLz83XjjTdqyZIl8vv9uvvuu1VUVMRuWKANo3YAiFZUAaWiokJTp07VwYMHlZGRoUGDBunVV1/VP//zP0uSli5dqoSEBE2cOFG1tbUaPXq0HnvsMef1HTp00EsvvaRbb71VPp9PaWlpmjZtmu6///7YjgqAVagdAKLlMsaY1u5EtKqqqpSRkaFgMCiPx9Pa3WkzKioqnCsjAoGAcnJynGXV1dVKT0+XJIVCIaWlpZ1zXdG2by0N+xkIBCLGX//Y5v7HUrR/s3jcDuOxz/Ggulr69J+OQiHpQjaXWK7rYonHPsdSNOOPZhvkt3gAAIB1CCgAAMA6BBQAAGAdAgoAALAOAQUAAFiHgAIAAKxDQAEAANYhoAAAAOsQUAAAgHUIKAAAwDoEFAAAYB0CCgAAsA4BBQAAWIeAAgAArENAAQAA1iGgAAAA6xBQAACAdQgoAADAOgQUAABgHQIKAACwDgEFAABYh4ACAACsQ0ABAADWIaAAAADrEFAAAIB1CCgAAMA6BBQAAGAdAgoAALAOAQUAAFiHgAIAAKxDQAEAANYhoAAAAOsQUAAAgHUIKAAAwDoEFAAAYB0CCgAAsA4BBQAAWIeAAgAArENAAQAA1iGgAAAA6xBQAACAdaIKKMXFxRo6dKg6duyonJwcXX/99SovL49oc80118jlckVMM2fOjGizf/9+FRYWKjU1VTk5OVqwYIHq6uoufDQArETtABCtxGgal5aWqqioSEOHDlVdXZ3uuusujRo1Srt27VJaWprT7pZbbtH999/vPE9NTXUenzx5UoWFhfJ6vdq8ebMOHjyoqVOnKikpSQ888EAMhgTANtQOANGKKqC88sorEc+feOIJ5eTkaOvWrRo+fLgzPzU1VV6vt9F1vPbaa9q1a5def/115ebm6gtf+IL+4z/+Q3fccYcWLVqk5OTkZgwDtjHGOI+rq6tbsSfn1rBvTXncljUcZ8O/XyxQO9BUDf/pxcum17Cf8dLnWGo45liWjqgCyumCwaAkKSsrK2L+008/rV//+tfyer0aN26c7rnnHuebUFlZmQYOHKjc3Fyn/ejRo3Xrrbdq586d+uIXv3jG+9TW1qq2ttZ5XlVVdSHdxkVw7Ngx53HDv7XNevfu3ejjeOl/LB07dkzp6ekttn5qB86mQelQPG568djnWDp2TIpV6Wh2QAmHw5o7d66uvvpqDRgwwJn/rW99Sz179lS3bt30zjvv6I477lB5ebmef/55SZLf7z+j4Nc/9/v9jb5XcXGxFi9e3NyuArAItQNAUzQ7oBQVFWnHjh3atGlTxPwZM2Y4jwcOHKiuXbtq5MiReu+999SnT59mvdfChQs1f/5853lVVZXy8vKa13FcFNnZ2QoEApJO7bZ3uVyt3KPGVVdXO//Jvf/++86ek4aPA4FAxHkSbZUxxtnzlZ2d3WLvQ+3AuWRnS5+WDqWmSpaWjgjV1Z/tOQkEpHZQLiIY89mer1iWjmYFlNmzZ+ull17Sxo0b1b1793O2HTZsmCRpz5496tOnj7xer/7whz9EtKn/j+xsx57dbrfcbndzuopWkpCQoJycnNbuRlQahpDTH7eHgCKpRQ/rSNQOnF9CghRnpSNCWlr7CyhS7A7rNBTVZcbGGM2ePVurVq3SunXr1KtXr/O+Ztu2bZKkrl27SpJ8Pp+2b9+uiooKp01JSYk8Ho/y8/Oj6Q6AOEHtABCtqPagFBUVacWKFXrxxRfVsWNH57hvRkaGUlJS9N5772nFihW67rrr1LlzZ73zzjuaN2+ehg8frkGDBkmSRo0apfz8fN14441asmSJ/H6/7r77bhUVFfFNB2ijqB0AomaiIKnRafny5cYYY/bv32+GDx9usrKyjNvtNn379jULFiwwwWAwYj379u0zY8eONSkpKSY7O9vcfvvt5sSJE03uRzAYNJLOWC8uTCAQcP6mgUAgYlkoFHKWhUKhVuph7DUc1+njb4vjjaVotkNqR9sWChlz6kyEU4/bm/Y+/mhEsw1GtQfFnOcC57y8PJWWlp53PT179tTq1aujeWsAcYzaASBa/BYPAACwzgXdqA3th4mTO8NGqyl3jz3ft38AQOwRUNAk8Xhn2Gid7U6yLX1XVQDAmTjEAwAArMMeFDRJvNwZNlpnu5Os3+93xtiSd1UFADSOgIImicc7w0ar4d1i09PT283dYwHARhziAQAA1iGgAAAA6xBQAACAdQgoAADAOgQUAABgHQIKAACwDpcZw3H67ezb0i3tz4Zb2gOAnQgocDS8nX3DW723Fw3HDwBoXRziAQAA1mEPChwNb+nu9/vbxQ/kGWOcPScpKSmt3BsAQD0CChwJCZ/tUGtPt3qvD2Lt4ZwbAIgXHOIBAADWIaAAAADrEFAAAIB1CCgAAMA6BBQAAGAdruKB4/Q7ybY33FUWaJ6Gm0s7LB0RY6Z0xA4BBY6Gd1LNzc1txZ60vmPHjrWL+8AAsdDwJsztvHTo2DGJ0hEbHOIBAADWYQ8KHNnZ2QoEApKk1NRUuVyuVu7RxdXwrrIN76oL4Nyys6VPS4dSU6V2VjpkzGd7kSgdsUNAgSMhIUE5OTmt3Y1WxWEdIHoJCVI7Lx0c1mkBHOIBAADWIaAAAADrEFAAAIB1CCgAAMA6BBQAAGAdAgoAALBOm73MOBwO69ChQ5La5z09zuX0+30kJJBTgXrhsPRp6WiX9/Q4m9Pv9UHZQEtrswHl0KFD7f527U0RCATa/b1PgIYOHeJ27ecTCHDfE7Q8MjAAALBOm92Dkpqa6jwOBAJKS0trxd7Ypbq62tm71PBzAnDqsE69QECidJxSXf3ZniXKBi6GNhtQGp5zkpaWRkA5C87NASI13CTS0ggojaFs4GLgEA8AALAOAQUAAFiHgAIAAKxDQAEAANaJKqAUFxdr6NCh6tixo3JycnT99dervLw8ok1NTY2KiorUuXNnpaena+LEiQoEAhFt9u/fr8LCQqWmpionJ0cLFixQXV3dhY8GgJWoHQCiFdVVPKWlpSoqKtLQoUNVV1enu+66S6NGjdKuXbucq2TmzZunl19+WStXrlRGRoZmz56tCRMm6I033pAknTx5UoWFhfJ6vdq8ebMOHjyoqVOnKikpSQ888EDMBhYOh53HoVAoZuu1DXeFRTyIp9oBwBLmAlRUVBhJprS01BhjTGVlpUlKSjIrV6502uzevdtIMmVlZcYYY1avXm0SEhKM3+932ixbtsx4PB5TW1vb6PvU1NSYYDDoTAcOHDCSTDAYPGvf3n//fSOpXU2BQKBJf7dQKOS8JhQKNek1wOmCweB5t8Ozsbl2hELGnLqx+6nHOIXPBbEQTd24oK/bwWBQkpSVlSVJ2rp1q06cOKGCggKnzWWXXaYePXqorKxMklRWVqaBAwdG3IZ+9OjRqqqq0s6dOxt9n+LiYmVkZDhTXl7ehXQbQCujdgA4n2bfqC0cDmvu3Lm6+uqrNWDAAEmS3+9XcnKyMjMzI9rm5ubK7/c7bU7/jZz65/VtTrdw4ULNnz/feV5VVXXeQtPwDqnvv/9+m/29Ge4Ki3hje+0AYIdmB5SioiLt2LFDmzZtimV/GuV2u+V2u6N6TXu8kyx3hUU8sL12ALBDsw7xzJ49Wy+99JLWr1+v7t27O/O9Xq+OHz+uysrKiPaBQEBer9dpc/qZ+fXP69sAaJuoHQCaKqqAYozR7NmztWrVKq1bt069evWKWD548GAlJSVp7dq1zrzy8nLt379fPp9PkuTz+bR9+3ZVVFQ4bUpKSuTxeJSfn38hYwFgKWoHgGhFdYinqKhIK1as0IsvvqiOHTs6x30zMjKUkpKijIwMTZ8+XfPnz1dWVpY8Ho9uu+02+Xw+XXXVVZKkUaNGKT8/XzfeeKOWLFkiv9+vu+++W0VFReyKBdooageAqEVzeZDOcnnr8uXLnTaffPKJmTVrlunUqZNJTU01X//6183Bgwcj1rNv3z4zduxYk5KSYrKzs83tt99uTpw40eR+NOUypUAgEPXlt/GoOZcMc5kxYiGaywXjqXZwOW3j+FwQC9HUDZcxxlzMQBQLVVVVysjIUDAYlMfjabRNRUWFc4Z/IBBo01fxpKenSzp1Q7qmnAzcnNcAp2vKdmibpvS5ulr6dPNQKCSxeZzC54JYiKZucNtRAABgHQIKAACwDgEFAABYh4ACAACsQ0ABAADWIaAAAADrEFAAAIB1CCgAAMA6BBQAAGAdAgoAALAOAQUAAFiHgAIAAKxDQAEAANYhoAAAAOsQUAAAgHUIKAAAwDoEFAAAYB0CCgAAsA4BBQAAWIeAAgAArENAAQAA1iGgAAAA6xBQAACAdQgoAADAOgQUAABgHQIKAACwDgEFAABYh4ACAACsQ0ABAADWIaAAAADrEFAAAIB1CCgAAMA6BBQAAGAdAgoAALAOAQUAAFiHgAIAAKxDQAEAANYhoAAAAOsQUAAAgHUIKAAAwDoEFAAAYJ2oA8rGjRs1btw4devWTS6XSy+88ELE8u985ztyuVwR05gxYyLaHDlyRFOmTJHH41FmZqamT5+uUCh0QQMBYC/qBoBoRR1QqqurdeWVV+qnP/3pWduMGTNGBw8edKbf/OY3EcunTJminTt3qqSkRC+99JI2btyoGTNmRN97AHGBugEgWonRvmDs2LEaO3bsOdu43W55vd5Gl+3evVuvvPKKtmzZoiFDhkiSHn30UV133XX6z//8T3Xr1i3aLgGwHHUDQLRa5ByUDRs2KCcnR/3799ett96qw4cPO8vKysqUmZnpFBlJKigoUEJCgt56661G11dbW6uqqqqICUDbEuu6IVE7gHgW84AyZswYPfXUU1q7dq1+9KMfqbS0VGPHjtXJkyclSX6/Xzk5ORGvSUxMVFZWlvx+f6PrLC4uVkZGhjPl5eXFutsAWlFL1A2J2gHEs6gP8ZzPpEmTnMcDBw7UoEGD1KdPH23YsEEjR45s1joXLlyo+fPnO8+rqqooNEAb0hJ1Q6J2APGsxS8z7t27t7Kzs7Vnzx5JktfrVUVFRUSburo6HTly5KzHn91utzweT8QEoO2KRd2QqB1APGvxgPK3v/1Nhw8fVteuXSVJPp9PlZWV2rp1q9Nm3bp1CofDGjZsWEt3B0AcoG4AiPoQTygUcr7VSNLevXu1bds2ZWVlKSsrS4sXL9bEiRPl9Xr13nvv6Xvf+5769u2r0aNHS5Iuv/xyjRkzRrfccosef/xxnThxQrNnz9akSZM4Ex9oo6gbAKJmorR+/Xoj6Yxp2rRp5tixY2bUqFGmS5cuJikpyfTs2dPccsstxu/3R6zj8OHDZvLkySY9Pd14PB5z0003maNHjza5D8Fg0EgywWDwrG0CgYDTt0AgEO0w40YoFHLGGQqFWuw1wOmash3Ws6FuNLXPoZAx0qmJzeMzfC6IhWjqhssYYy5uJLpwVVVVysjIUDAYPOsx5YqKCuXm5kqSAoHAGVcAtBXV1dVKT0+XdOpbalpaWou8BjhdU7ZD2zSlz9XV0qebh0Ihic3jFD4XxEI0dYPf4gEAANYhoAAAAOsQUAAAgHUIKAAAwDoEFAAAYB0CCgAAsA4BBQAAWIeAAgAArENAAQAA1iGgAAAA6xBQAACAdQgoAADAOgQUAABgHQIKAACwDgEFAABYh4ACAACsQ0ABAADWIaAAAADrEFAAAIB1CCgAAMA6BBQAAGAdAgoAALAOAQUAAFiHgAIAAKxDQAEAANYhoAAAAOsQUAAAgHUIKAAAwDoEFAAAYB0CCgAAsA4BBQAAWIeAAgAArENAAQAA1iGgAAAA6xBQAACAdQgoAADAOgQUAABgHQIKAACwDgEFAABYh4ACAACsQ0ABAADWIaAAAADrRB1QNm7cqHHjxqlbt25yuVx64YUXIpYbY3Tvvfeqa9euSklJUUFBgd59992INkeOHNGUKVPk8XiUmZmp6dOnKxQKXdBAANiLugEgWonRvqC6ulpXXnmlbr75Zk2YMOGM5UuWLNEjjzyiJ598Ur169dI999yj0aNHa9euXbrkkkskSVOmTNHBgwdVUlKiEydO6KabbtKMGTO0YsWKCx/Rp8LhsPP4o48+itl6bVNdXe08bmqxbvgaY0zM+wScLl7qhiQ13CQabCrtXsPPgrKBi8JcAElm1apVzvNwOGy8Xq956KGHnHmVlZXG7Xab3/zmN8YYY3bt2mUkmS1btjht1qxZY1wul/n73//e6PvU1NSYYDDoTAcOHDCSTDAYPGvfduzYYSQxnWcKBAIX8k8A7VgwGDzvdtiYi1U3jGle7QgEjDn1XzDT2SbKBpormroR03NQ9u7dK7/fr4KCAmdeRkaGhg0bprKyMklSWVmZMjMzNWTIEKdNQUGBEhIS9NZbbzW63uLiYmVkZDhTXl5eLLsNoBW1VN2QqB1APIv6EM+5+P1+SVJubm7E/NzcXGeZ3+9XTk5OZCcSE5WVleW0Od3ChQs1f/5853lVVdV5C03//v21Y8cOSVLnzp2VkNA2zwc2xujYsWOSpOzs7CaN8/TXAK2ppeqG1LzakZ0tBQKnHqemSi5Xk4fSphkjfVo2RNnAxRDTgNJS3G633G53VK9JTEzUFVdc0UI9in/p6emt3QWgxTWndiQkSKdlIXyKsoGLKaa7FbxeryQpUP/141OBQMBZ5vV6VVFREbG8rq5OR44ccdoAaD+oGwAaE9OA0qtXL3m9Xq1du9aZV1VVpbfeeks+n0+S5PP5VFlZqa1btzpt1q1bp3A4rGHDhsWyOwDiAHUDQGOiPsQTCoW0Z88e5/nevXu1bds2ZWVlqUePHpo7d65+8IMfqF+/fs7lgt26ddP1118vSbr88ss1ZswY3XLLLXr88cd14sQJzZ49W5MmTVK3bt1iNjAA9qBuAIhatJcIrV+/vtHLVadNm2aMOXXJ4D333GNyc3ON2+02I0eONOXl5RHrOHz4sJk8ebJJT083Ho/H3HTTTebo0aNN7kNzL28EEDvRbIc21I1o+wwg9qLZBl3GxN8td6qqqpSRkaFgMCiPx9Pa3QHapXjcDuOxz0BbEs022DavvQUAAHGNgAIAAKxDQAEAANYhoAAAAOsQUAAAgHUIKAAAwDoEFAAAYB0CCgAAsE5c/Jrx6ervLVdVVdXKPQHar/rtL57u9UjtAFpXNHUjLgPK0aNHJUl5eXmt3BMAR48eVUZGRmt3o0moHYAdmlI34vJW9+FwWB9++KE6duwol8t11nZVVVXKy8vTgQMH2uVtrRk/42/J8RtjdPToUXXr1k0JCfFxtJja0TSMn/G31PijqRtxuQclISFB3bt3b3J7j8fTLv+R1WP8jL+lxh8ve07qUTuiw/gZf0uMv6l1Iz6+9gAAgHaFgAIAAKzTpgOK2+3WfffdJ7fb3dpdaRWMn/G35/FfiPb+2TF+xm/D+OPyJFkAANC2tek9KAAAID4RUAAAgHUIKAAAwDoEFAAAYB0CCgAAsE5cBpSNGzdq3Lhx6tatm1wul1544YWI5cYY3XvvveratatSUlJUUFCgd999N6LNkSNHNGXKFHk8HmVmZmr69OkKhUIXcRTNU1xcrKFDh6pjx47KycnR9ddfr/Ly8og2NTU1KioqUufOnZWenq6JEycqEAhEtNm/f78KCwuVmpqqnJwcLViwQHV1dRdzKM2ybNkyDRo0yLnDoc/n05o1a5zlbXnsjXnwwQflcrk0d+5cZ157+wyaqj3XDYnaQe2IFBe1w8Sh1atXm+9///vm+eefN5LMqlWrIpY/+OCDJiMjw7zwwgvmz3/+sxk/frzp1auX+eSTT5w2Y8aMMVdeeaV58803ze9//3vTt29fM3ny5Is8kuiNHj3aLF++3OzYscNs27bNXHfddaZHjx4mFAo5bWbOnGny8vLM2rVrzR//+Edz1VVXmX/4h39wltfV1ZkBAwaYgoIC8/bbb5vVq1eb7Oxss3DhwtYYUlT+7//+z7z88svmr3/9qykvLzd33XWXSUpKMjt27DDGtO2xn+4Pf/iDufTSS82gQYPMnDlznPnt6TOIRnuuG8ZQO6gdn4mX2hGXAaWh0wtNOBw2Xq/XPPTQQ868yspK43a7zW9+8xtjjDG7du0yksyWLVucNmvWrDEul8v8/e9/v2h9j4WKigojyZSWlhpjTo01KSnJrFy50mmze/duI8mUlZUZY04V6oSEBOP3+502y5YtMx6Px9TW1l7cAcRAp06dzC9+8Yt2NfajR4+afv36mZKSEjNixAinyLSnz+BCtPe6YQy1wxhqh+21Iy4P8ZzL3r175ff7VVBQ4MzLyMjQsGHDVFZWJkkqKytTZmamhgwZ4rQpKChQQkKC3nrrrYve5wsRDAYlSVlZWZKkrVu36sSJExHjv+yyy9SjR4+I8Q8cOFC5ublOm9GjR6uqqko7d+68iL2/MCdPntQzzzyj6upq+Xy+djX2oqIiFRYWRoxVal9//1hqb3VDonZQO+yvHXH5a8bn4vf7JSniA6x/Xr/M7/crJycnYnliYqKysrKcNvEgHA5r7ty5uvrqqzVgwABJp8aWnJyszMzMiLanj7+xz6d+me22b98un8+nmpoapaena9WqVcrPz9e2bdva/Ngl6ZlnntGf/vQnbdmy5Yxl7eHv3xLaU92QqB3UjvioHW0uoLQnRUVF2rFjhzZt2tTaXbmo+vfvr23btikYDOq5557TtGnTVFpa2trduigOHDigOXPmqKSkRJdccklrdwdxitpB7YgHbe4Qj9frlaQzzjwOBALOMq/Xq4qKiojldXV1OnLkiNPGdrNnz9ZLL72k9evXq3v37s58r9er48ePq7KyMqL96eNv7POpX2a75ORk9e3bV4MHD1ZxcbGuvPJKPfzww+1i7Fu3blVFRYW+9KUvKTExUYmJiSotLdUjjzyixMRE5ebmtvnPoCW0l7ohUTuoHfFTO9pcQOnVq5e8Xq/Wrl3rzKuqqtJbb70ln88nSfL5fKqsrNTWrVudNuvWrVM4HNawYcMuep+jYYzR7NmztWrVKq1bt069evWKWD548GAlJSVFjL+8vFz79++PGP/27dsjim1JSYk8Ho/y8/MvzkBiKBwOq7a2tl2MfeTIkdq+fbu2bdvmTEOGDNGUKVOcx239M2gJbb1uSNSOxlA7LK8dMT/t9iI4evSoefvtt83bb79tJJkf//jH5u233zYffPCBMebU5YKZmZnmxRdfNO+884752te+1ujlgl/84hfNW2+9ZTZt2mT69esXF5cL3nrrrSYjI8Ns2LDBHDx40JmOHTvmtJk5c6bp0aOHWbdunfnjH/9ofD6f8fl8zvL6S8VGjRpltm3bZl555RXTpUuXuLhc7s477zSlpaVm79695p133jF33nmncblc5rXXXjPGtO2xn03DM/GNaZ+fQVO057phDLWD2nEm22tHXAaU9evXG0lnTNOmTTPGnLpk8J577jG5ubnG7XabkSNHmvLy8oh1HD582EyePNmkp6cbj8djbrrpJnP06NFWGE10Ghu3JLN8+XKnzSeffGJmzZplOnXqZFJTU83Xv/51c/DgwYj17Nu3z4wdO9akpKSY7Oxsc/vtt5sTJ05c5NFE7+abbzY9e/Y0ycnJpkuXLmbkyJFOgTGmbY/9bE4vMu3xM2iK9lw3jKF2UDvOZHvtcBljTOz3ywAAADRfmzsHBQAAxD8CCgAAsA4BBQAAWIeAAgAArENAAQAA1iGgAAAA6xBQAACAdQgoAADAOgQUAABgHQIKAACwDgEFAABY5/8BudKTTZA/XB4AAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "\n", + "boxes=np.array([[100,100,210,210,0.72],\n", + " [250,250,420,420,0.8],\n", + " [220,220,320,330,0.92],\n", + " [100,100,190,200,0.71],\n", + " [230,240,325,330,0.81],\n", + " [220,230,315,340,0.9]]) \n", + " \n", + " \n", + "def py_cpu_nms(dets, thresh):\n", + " \"Pure Python NMS baseline\"\n", + " # x1、y1、x2、y2以及score赋值\n", + " x1 = dets[:,0]\n", + " y1 = dets[:,1]\n", + " x2 = dets[:,2]\n", + " y2 = dets[:,3]\n", + " scores = dets[:, 4]\n", + "\n", + " # 每一个检测框的面积\n", + " areas = (y2-y1+1) * (x2-x1+1)\n", + " print(areas)\n", + " # 按照score置信度降序排序\n", + " order = scores.argsort()[::-1]\n", + "\n", + " keep = [] # 保留的结果框集合\n", + " while order.size >0:\n", + " i = order[0] # every time the first is the biggst, and add it directly\n", + " keep.append(i) # 保留该类剩余box中得分最高的一个\n", + " # 得到相交区域,左上及右下\n", + " xx1 = np.maximum(x1[i], x1[order[1:]])\n", + " yy1 = np.maximum(y1[i], y1[order[1:]])\n", + " xx2 = np.minimum(x2[i], x2[order[1:]])\n", + " yy2 = np.minimum(y2[i], y2[order[1:]])\n", + "\n", + " # 计算相交的面积,不重叠时面积为0\n", + " w = np.maximum(0, xx2-xx1+1) # the weights of overlap\n", + " h = np.maximum(0, yy2-yy1+1) # the height of overlap\n", + " inter = w*h\n", + " # 计算IoU:重叠面积 /(面积1+面积2-重叠面积)\n", + " ovr = inter / (areas[i]+areas[order[1:]] - inter)\n", + " # 保留IoU小于阈值的box\n", + " indx = np.where(ovr<=thresh)[0]\n", + " order = order[indx+1] # 因为ovr数组的长度比order数组少一个,所以这里要将所有下标后移一位\n", + "\n", + " return keep\n", + " \n", + " \n", + "\n", + "def plot_bbox(dets, c='k'):\n", + " x1 = dets[:,0]\n", + " y1 = dets[:,1]\n", + " x2 = dets[:,2]\n", + " y2 = dets[:,3]\n", + " \n", + " plt.plot([x1,x2], [y1,y1], c)\n", + " plt.plot([x1,x1], [y1,y2], c)\n", + " plt.plot([x1,x2], [y2,y2], c)\n", + " plt.plot([x2,x2], [y1,y2], c)\n", + " #plt.title(\" nms\")\n", + " #plt.show()\n", + "\n", + "plt.figure(1)\n", + "ax1 = plt.subplot(1,2,1)\n", + "ax1.set_title('before nms')\n", + "ax2 = plt.subplot(1,2,2)\n", + "ax2.set_title('after nms')\n", + " \n", + "plt.sca(ax1)\n", + "plot_bbox(boxes,'k') # before nms\n", + "\n", + "keep = py_cpu_nms(boxes, thresh=0.7)\n", + "plt.sca(ax2)\n", + "plot_bbox(boxes[keep], 'b')# after nms\n", + "plt.show()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 计算模型的参数量和计算量" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "torch.Size([1, 32, 320, 320])" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from torchstat import stat\n", + "import torchvision.models as models\n", + "import torch.nn as nn\n", + "import torch\n", + "\n", + "# With square kernels and equal stride\n", + "m = nn.Conv2d(16, 33, 3, stride=2)\n", + "# non-square kernels and unequal stride and with padding\n", + "m = nn.Conv2d(12, 32, (3, 3), stride=(1, 1), padding=(1, 1))\n", + "# non-square kernels and unequal stride and with padding and dilation\n", + "# m = nn.Conv2d(12, 32, (3, 3), stride=(1, 1), padding=(1, 1), dilation=(3, 1))\n", + "input = torch.randn(1, 12, 320, 320)\n", + "output = m(input)\n", + "output.shape" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from torchstat import stat\n", + "import torchvision.models as models\n", + "import torch\n", + "from torchinfo import summary\n", + "\n", + "model = models.vgg16()\n", + "\n", + "# stat(model, (3, 224, 224))\n", + "# summary(model, input_size=(1, 3, 64, 64))" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([[[[1., 1., 2., 2.],\n", + " [1., 1., 2., 2.],\n", + " [3., 3., 4., 4.],\n", + " [3., 3., 4., 4.]],\n", + "\n", + " [[5., 5., 6., 6.],\n", + " [5., 5., 6., 6.],\n", + " [7., 7., 8., 8.],\n", + " [7., 7., 8., 8.]]]])" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import torch.nn as nn\n", + "import torch\n", + "\n", + "input = torch.arange(1, 9, dtype=torch.float32).view(1, 2, 2, 2)\n", + "\n", + "m = nn.Upsample(scale_factor=2, mode='nearest')\n", + "m(input)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "pt39", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.19" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/Inference.py b/Inference.py new file mode 100644 index 000000000..930d4ea03 --- /dev/null +++ b/Inference.py @@ -0,0 +1,154 @@ +import argparse +import os + +import cv2 +import numpy as np +import torch + +from loguru import logger +import time +import onnxruntime +import torchvision.transforms as transforms + +from yolox.data.data_augment import preproc as preprocess +from yolox.utils import mkdir, multiclass_nms, demo_postprocess, vis +from PIL import Image + + +CLASSES = ( + 'people','car', +) + +def make_parser(): + parser = argparse.ArgumentParser("onnxruntime inference sample") + parser.add_argument( + "--model", + type=str, + default="/home/whoami/Documents/Hanvon/yoloxs_0528.onnx", + help="Input your onnx model.", + ) + parser.add_argument( + "--mode", + type=str, + default="video", + help="mode type, eg. image, video and webcam.", + ) + parser.add_argument( + "--input_path", + type=str, + default='/home/whoami/Videos/20230101_005748_vflip.MP4', + help="Path to your input image.", + ) + parser.add_argument( + "--camid", + type=int, + default=0, + help="webcam demo camera id", + ) + parser.add_argument( + "--output_path", + type=str, + default='outputs_videos', + help="Path to your output directory.", + ) + parser.add_argument( + "-s", + "--score_thr", + type=float, + default=0.3, + help="Score threshould to filter the result.", + ) + parser.add_argument( + "--input_shape", + type=str, + default="320,320", + help="Specify an input shape for inference.", + ) + parser.add_argument( + "--with_p6", + action="store_true", + help="Whether your model uses p6 in FPN/PAN.", + ) + return parser + + +def inference(args, origin_img): + t0 = time.time() + input_shape = tuple(map(int, args.input_shape.split(','))) + + img, ratio = preprocess(origin_img, input_shape) + session = onnxruntime.InferenceSession(args.model) + + ort_inputs = {session.get_inputs()[0].name: img[None, :, :, :]} + output = session.run(None, ort_inputs) + # print(output[0].shape) + predictions = demo_postprocess(output[0], input_shape, p6=args.with_p6)[0] + + boxes = predictions[:, :4] + scores = predictions[:, 4:5] * predictions[:, 5:] + + boxes_xyxy = np.ones_like(boxes) + boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2]/2. + boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3]/2. + boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2]/2. + boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3]/2. + boxes_xyxy /= ratio + dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.5, score_thr=0.5) + if dets is not None: + final_boxes, final_scores, final_cls_inds = dets[:, :4], dets[:, 4], dets[:, 5] + origin_img = vis(origin_img, final_boxes, final_scores, final_cls_inds, + conf=args.score_thr, class_names=CLASSES) + + logger.info("Infer time: {:.4f}s".format(time.time() - t0)) + return origin_img + +def image_process(args): + origin_img = cv2.imread(args.input_path) + origin_img = inference(args, origin_img) + mkdir(args.output_path) + output_path = os.path.join(args.output_path, args.input_path.split("/")[-1]) + logger.info("Saving detection result in {}".format(output_path)) + cv2.imwrite(output_path, origin_img) + +def imageflow_demo(args): + cap = cv2.VideoCapture(args.input_path if args.mode == "video" else args.camid) + width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float + height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float + fps = cap.get(cv2.CAP_PROP_FPS) + + mkdir(args.output_path) + current_time = time.localtime() + save_folder = os.path.join( + args.output_path, time.strftime("%Y_%m_%d_%H_%M_%S", current_time) + ) + os.makedirs(save_folder, exist_ok=True) + if args.mode == "video": + save_path = os.path.join(save_folder, args.input_path.split("/")[-1]) + else: + save_path = os.path.join(save_folder, "camera.mp4") + + logger.info(f"video save_path is {save_path}") + vid_writer = cv2.VideoWriter( + save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (int(width), int(height)) + ) + while True: + ret_val, frame = cap.read() + if ret_val: + result_frame = inference(args, frame) + cv2.imshow('frame',result_frame) + vid_writer.write(result_frame) + ch = cv2.waitKey(1) + if ch == 27 or ch == ord("q") or ch == ord("Q"): + break + else: + break + +if __name__ == '__main__': + args = make_parser().parse_args() + if args.mode == "image": + image_process(args) + elif args.mode == "video" or args.mode == "webcam": + imageflow_demo(args) + + + \ No newline at end of file diff --git a/Inference2c.ipynb b/Inference2c.ipynb new file mode 100644 index 000000000..7b780f382 --- /dev/null +++ b/Inference2c.ipynb @@ -0,0 +1,110 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# [Loading a TorchScript Model in C++](https://pytorch.org/tutorials/advanced/cpp_export.html#loading-a-torchscript-model-in-c)" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import torchvision\n", + "\n", + "# An instance of your model.\n", + "model = torchvision.models.resnet18()\n", + "\n", + "# An example input you would normally provide to your model's forward() method.\n", + "example = torch.rand(1, 3, 224, 224)\n", + "\n", + "# Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing.\n", + "traced_script_module = torch.jit.trace(model, example)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([ 0.1552, 0.8421, -0.7267, -0.3862, 0.8198], grad_fn=)" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "output = traced_script_module(torch.ones(1, 3, 224, 224))\n", + "output[0, :5]" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "class MyModule(torch.nn.Module):\n", + " def __init__(self, N, M):\n", + " super(MyModule, self).__init__()\n", + " self.weight = torch.nn.Parameter(torch.rand(N, M))\n", + "\n", + " def forward(self, input):\n", + " if input.sum() > 0:\n", + " output = self.weight.mv(input)\n", + " else:\n", + " output = self.weight + input\n", + " return output\n", + "\n", + "my_module = MyModule(10,20)\n", + "sm = torch.jit.script(my_module)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "traced_script_module.save(\"traced_resnet_model.pt\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "pt39", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.19" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/Inference_img_0529.py b/Inference_img_0529.py new file mode 100644 index 000000000..18bd6c2c5 --- /dev/null +++ b/Inference_img_0529.py @@ -0,0 +1,202 @@ +import argparse +import os + +import cv2 +import numpy as np +import torch + +from loguru import logger +import time +import onnxruntime +import torchvision.transforms as transforms + +from yolox.data.data_augment import preproc as preprocess +from yolox.utils import mkdir, multiclass_nms, demo_postprocess, vis +from PIL import Image +import copy +np.set_printoptions(threshold=np.inf) + +CLASSES = ( + 'people','car' +) + +def make_parser(): + parser = argparse.ArgumentParser("onnxruntime inference sample") + parser.add_argument( + "--model", + type=str, + default="/home/whoami/Documents/Hanvon/yoloxs0413_320.onnx", + help="Input your onnx model.", + ) + parser.add_argument( + "--mode", + type=str, + default="image", + help="mode type, eg. image, video and webcam.", + ) + parser.add_argument( + "--input_path", + type=str, + default='assets/001.jpg', # 'outputs_imgs/crop_001/cropped_image_67.jpg' + help="Path to your input image.", + )# 'assets/test_frames/001.jpg' # 'outputs_imgs/crop_002/cropped_image_37.jpg' + parser.add_argument( + "--camid", + type=int, + default=0, + help="webcam demo camera id", + ) + parser.add_argument( + "--output_path", + type=str, + default='outputs_imgs', + help="Path to your output directory.", + ) + parser.add_argument( + "-s", + "--score_thr", + type=float, + default=0.3, + help="Score threshould to filter the result.", + ) + parser.add_argument( + "--input_shape", + type=str, + default="320,320", + help="Specify an input shape for inference.", + ) + parser.add_argument( + "--with_p6", + action="store_true", + help="Whether your model uses p6 in FPN/PAN.", + ) + return parser + + +def inference(args, origin_img): + t0 = time.time() + input_shape = tuple(map(int, args.input_shape.split(','))) + + img, ratio = preprocess(origin_img, input_shape) + + session = onnxruntime.InferenceSession(args.model) + + ort_inputs = {session.get_inputs()[0].name: img[None, :, :, :]} + output = session.run(None, ort_inputs) + print(output[0].shape) + predictions = demo_postprocess(output[0], input_shape, p6=args.with_p6)[0] + + return predictions, ratio + +def image_process(args): + frame = cv2.imread(args.input_path) + # 定义切片参数 + slice_size = (940, 940) + overlap = 0 + + # 对图像进行切片 + count = 0 + total_time = 0 + height, width = frame.shape[:2] + + stride = int(slice_size[0] * (1 - overlap)) + copied_frame = copy.deepcopy(frame) + + for y in range(0, height, stride): + for x in range(0, width, stride): + t0 = time.time() + + box = (x, y) + + if x + slice_size[0] <= width and y + slice_size[1] <= height: + slice_img = copied_frame[y:y + slice_size[1], x:x + slice_size[0]] + elif x + slice_size[0] > width and y + slice_size[1] <= height: + slice_img = copied_frame[y:y + slice_size[1], width-slice_size[0]:width] + box = (width-slice_size[0], y) + elif x + slice_size[0] <= width and y + slice_size[1] > height: + slice_img = copied_frame[height-slice_size[1]:height, x:x + slice_size[0]] + box = (x, height-slice_size[1]) + elif x + slice_size[0] > width and y + slice_size[1] > height: + slice_img = copied_frame[height-slice_size[1]:height, width-slice_size[0]:width] + box = (width-slice_size[0], height-slice_size[1]) + + + # print(f"{count} x, y coordinate are {x} and {y}") + + # cv2.imwrite('outputs_imgs/crop_003/cropped_image_{}.jpg'.format(count), slice_img, [cv2.IMWRITE_PNG_COMPRESSION, 100]) + # print("裁剪后的图像 {}.jpg 已保存。".format(count)) + # count += 1 + + # 对每个切片进行目标检测 + input_shape = tuple(map(int, args.input_shape.split(','))) + r = input_shape[0] / slice_size[0] + print("Scaling ratio is ", r) + pred, r = inference(args, slice_img) + if pred is not None: + boxes = pred[:, :4] + scores = pred[:, 4:5] * pred[:, 5:] + boxes_xyxy = np.ones_like(boxes) + boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2]/2. + boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3]/2. + boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2]/2. + boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3]/2. + boxes_xyxy /= r + boxes_xyxy[:, :4] += [box[0], box[1], box[0], box[1]] + + dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=0.4) + if dets is not None: + final_boxes, final_scores, final_cls_inds = dets[:, :4], dets[:, 4], dets[:, 5] + frame = vis(frame, final_boxes, final_scores, final_cls_inds, + conf=args.score_thr, class_names=CLASSES) + + + logger.info("Infer time: {:.4f}s".format(time.time() - t0)) + total_time += (time.time() - t0) + print("Totoal inference time is ", total_time) + # 写入拼接后的图像帧 + mkdir(args.output_path) + output_path = os.path.join(args.output_path, args.input_path.split("/")[-1]) + logger.info("Saving detection result in {}".format(output_path)) + cv2.imwrite(output_path, frame) + +def imageflow_demo(args): + cap = cv2.VideoCapture(args.input_path if args.mode == "video" else args.camid) + width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float + height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float + fps = cap.get(cv2.CAP_PROP_FPS) + + mkdir(args.output_path) + current_time = time.localtime() + save_folder = os.path.join( + args.output_path, time.strftime("%Y_%m_%d_%H_%M_%S", current_time) + ) + os.makedirs(save_folder, exist_ok=True) + if args.mode == "video": + save_path = os.path.join(save_folder, args.input_path.split("/")[-1]) + else: + save_path = os.path.join(save_folder, "camera.mp4") + + logger.info(f"video save_path is {save_path}") + vid_writer = cv2.VideoWriter( + save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (int(width), int(height)) + ) + while True: + ret_val, frame = cap.read() + if ret_val: + result_frame = inference(args, frame) + vid_writer.write(result_frame) + ch = cv2.waitKey(1) + if ch == 27 or ch == ord("q") or ch == ord("Q"): + break + else: + break + +if __name__ == '__main__': + args = make_parser().parse_args() + if args.mode == "image": + image_process(args) + elif args.mode == "video" or args.mode == "webcam": + imageflow_demo(args) + + + diff --git a/Inference_img_crop_0529.py b/Inference_img_crop_0529.py new file mode 100644 index 000000000..37fa2d9ef --- /dev/null +++ b/Inference_img_crop_0529.py @@ -0,0 +1,153 @@ +import argparse +import os + +import cv2 +import numpy as np +import torch + +from loguru import logger +import time +import onnxruntime +import torchvision.transforms as transforms + +from yolox.data.data_augment import preproc as preprocess +from yolox.utils import mkdir, multiclass_nms, demo_postprocess, vis +from PIL import Image + + +CLASSES = ( + 'people','car', +) + +def make_parser(): + parser = argparse.ArgumentParser("onnxruntime inference sample") + parser.add_argument( + "--model", + type=str, + default="/home/whoami/Documents/Hanvon/yoloxs0413_320.onnx", + help="Input your onnx model.", + ) + parser.add_argument( + "--mode", + type=str, + default="image", + help="mode type, eg. image, video and webcam.", + ) + parser.add_argument( + "--input_path", + type=str, + default='assets/001.jpg', + help="Path to your input image.", + ) + parser.add_argument( + "--camid", + type=int, + default=0, + help="webcam demo camera id", + ) + parser.add_argument( + "--output_path", + type=str, + default='outputs_imgs_crop', + help="Path to your output directory.", + ) + parser.add_argument( + "-s", + "--score_thr", + type=float, + default=0.3, + help="Score threshould to filter the result.", + ) + parser.add_argument( + "--input_shape", + type=str, + default="320,320", + help="Specify an input shape for inference.", + ) + parser.add_argument( + "--with_p6", + action="store_true", + help="Whether your model uses p6 in FPN/PAN.", + ) + return parser + + +def inference(args, origin_img): + t0 = time.time() + input_shape = tuple(map(int, args.input_shape.split(','))) + + img, ratio = preprocess(origin_img, input_shape) + session = onnxruntime.InferenceSession(args.model) + + ort_inputs = {session.get_inputs()[0].name: img[None, :, :, :]} + output = session.run(None, ort_inputs) + print(output[0].shape) + predictions = demo_postprocess(output[0], input_shape, p6=args.with_p6)[0] + + boxes = predictions[:, :4] + scores = predictions[:, 4:5] * predictions[:, 5:] + + boxes_xyxy = np.ones_like(boxes) + boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2]/2. + boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3]/2. + boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2]/2. + boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3]/2. + boxes_xyxy /= ratio + dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=0.4) + if dets is not None: + final_boxes, final_scores, final_cls_inds = dets[:, :4], dets[:, 4], dets[:, 5] + origin_img = vis(origin_img, final_boxes, final_scores, final_cls_inds, + conf=args.score_thr, class_names=CLASSES) + + logger.info("Infer time: {:.4f}s".format(time.time() - t0)) + return origin_img + +def image_process(args): + origin_img = cv2.imread(args.input_path) + origin_img = inference(args, origin_img) + mkdir(args.output_path) + output_path = os.path.join(args.output_path, args.input_path.split("/")[-1]) + logger.info("Saving detection result in {}".format(output_path)) + cv2.imwrite(output_path, origin_img) + +def imageflow_demo(args): + cap = cv2.VideoCapture(args.input_path if args.mode == "video" else args.camid) + width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float + height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float + fps = cap.get(cv2.CAP_PROP_FPS) + + mkdir(args.output_path) + current_time = time.localtime() + save_folder = os.path.join( + args.output_path, time.strftime("%Y_%m_%d_%H_%M_%S", current_time) + ) + os.makedirs(save_folder, exist_ok=True) + if args.mode == "video": + save_path = os.path.join(save_folder, args.input_path.split("/")[-1]) + else: + save_path = os.path.join(save_folder, "camera.mp4") + + logger.info(f"video save_path is {save_path}") + vid_writer = cv2.VideoWriter( + save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (int(width), int(height)) + ) + while True: + ret_val, frame = cap.read() + if ret_val: + result_frame = inference(args, frame) + vid_writer.write(result_frame) + ch = cv2.waitKey(1) + if ch == 27 or ch == ord("q") or ch == ord("Q"): + break + else: + break + +if __name__ == '__main__': + args = make_parser().parse_args() + if args.mode == "image": + image_process(args) + elif args.mode == "video" or args.mode == "webcam": + imageflow_demo(args) + + + \ No newline at end of file diff --git a/Inference_same_image.py b/Inference_same_image.py new file mode 100644 index 000000000..da5bb0f2d --- /dev/null +++ b/Inference_same_image.py @@ -0,0 +1,210 @@ +import argparse +import os + +import cv2 +import numpy as np +import torch + +from loguru import logger +import time +import onnxruntime +import torchvision.transforms as transforms + +from yolox.data.data_augment import preproc as preprocess +from yolox.utils import mkdir, multiclass_nms, demo_postprocess, vis +from PIL import Image +import copy +np.set_printoptions(threshold=np.inf) + +# ❯ python Inference_same_image.py +# 2024-07-05 10:40:41.962 | INFO | __main__:inference:88 - Infer time: 0.0643s +# ratio is 0.16666666666666666 +# 2024-07-05 10:40:41.968 | INFO | __main__:image_process:151 - Infer time: 0.1445s +# 2024-07-05 10:40:41.968 | INFO | __main__:image_process:156 - Saving detection result in outputs_imgs/001.jpg + +CLASSES = ( + 'people','car' +) + +def make_parser(): + parser = argparse.ArgumentParser("onnxruntime inference sample") + parser.add_argument( + "--model", + type=str, + default="/home/whoami/Documents/Hanvon/yolox_s.onnx", + help="Input your onnx model.", + ) + parser.add_argument( + "--mode", + type=str, + default="image", + help="mode type, eg. image, video and webcam.", + ) + parser.add_argument( + "--input_path", + type=str, + default='assets/001.jpg', + help="Path to your input image.", + ) + parser.add_argument( + "--camid", + type=int, + default=0, + help="webcam demo camera id", + ) + parser.add_argument( + "--output_path", + type=str, + default='outputs_imgs/', + help="Path to your output directory.", + ) + parser.add_argument( + "-s", + "--score_thr", + type=float, + default=0.3, + help="Score threshould to filter the result.", + ) + parser.add_argument( + "--input_shape", + type=str, + default="640,640", + help="Specify an input shape for inference.", + ) + parser.add_argument( + "--with_p6", + action="store_true", + help="Whether your model uses p6 in FPN/PAN.", + ) + return parser + + +def inference(args, origin_img): + + input_shape = tuple(map(int, args.input_shape.split(','))) + + img, ratio = preprocess(origin_img, input_shape) + + session = onnxruntime.InferenceSession(args.model) + + ort_inputs = {session.get_inputs()[0].name: img[None, :, :, :]} + + t0 = time.time() + output = session.run(None, ort_inputs) + logger.info("Infer time: {:.4f}s".format(time.time() - t0)) + + predictions = demo_postprocess(output[0], input_shape, p6=args.with_p6)[0] + + + return predictions, ratio + +def image_process(args): + origin_img = cv2.imread(args.input_path) + slice_size = (3840, 2160) + height, width = origin_img.shape[:2] + overlap = 0 + stride = int(slice_size[0] * (1 - overlap)) + count = 0 + total_time = 0 + copied_frame = copy.deepcopy(origin_img) + for y in range(0, height, stride): + for x in range(0, width, stride): + t0 = time.time() + + box = (x, y) + + if x + slice_size[0] <= width and y + slice_size[1] <= height: + slice_img = copied_frame[y:y + slice_size[1], x:x + slice_size[0]] + elif x + slice_size[0] > width and y + slice_size[1] <= height: + slice_img = copied_frame[y:y + slice_size[1], width-slice_size[0]:width] + box = (width-slice_size[0], y) + elif x + slice_size[0] <= width and y + slice_size[1] > height: + slice_img = copied_frame[height-slice_size[1]:height, x:x + slice_size[0]] + box = (x, height-slice_size[1]) + elif x + slice_size[0] > width and y + slice_size[1] > height: + slice_img = copied_frame[height-slice_size[1]:height, width-slice_size[0]:width] + box = (width-slice_size[0], height-slice_size[1]) + + + + pred, ratio = inference(args, slice_img) + print("ratio is ", ratio) + boxes = pred[:, :4] + scores = pred[:, 4:5] * pred[:, 5:] + + # print(f"Boxes are {pred[:, :4].shape}") + # print(f"scores1 are {pred[:, 4:5].shape}") + # print(f"scores2 are {pred[:, 5:].shape}") + # print(f"scores are ", scores.shape) + + boxes_xyxy = np.ones_like(boxes) + boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2]/2. + boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3]/2. + boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2]/2. + boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3]/2. + boxes_xyxy /= ratio + dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.5, score_thr=0.5) + if dets is not None: + final_boxes, final_scores, final_cls_inds = dets[:, :4], dets[:, 4], dets[:, 5] + slice_img = vis(slice_img, final_boxes, final_scores, final_cls_inds, + conf=args.score_thr, class_names=CLASSES) + + # 写入原图 + final_boxes[:, :4] += [box[0], box[1], box[0], box[1]] + origin_img = vis(origin_img, final_boxes, final_scores, final_cls_inds, + conf=args.score_thr, class_names=CLASSES) + + logger.info("Infer time: {:.4f}s".format(time.time() - t0)) + total_time += time.time() - t0 + + mkdir(args.output_path) + output_path = os.path.join(args.output_path, args.input_path.split("/")[-1]) + logger.info("Saving detection result in {}".format(output_path)) + cv2.imwrite('outputs_imgs/crop_001/cropped_image_{}.jpg'.format(count), slice_img) + count += 1 + + cv2.imwrite(output_path, origin_img) + print(f"Total used time is {total_time}s") + + +def imageflow_demo(args): + cap = cv2.VideoCapture(args.input_path if args.mode == "video" else args.camid) + width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float + height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float + fps = cap.get(cv2.CAP_PROP_FPS) + + mkdir(args.output_path) + current_time = time.localtime() + save_folder = os.path.join( + args.output_path, time.strftime("%Y_%m_%d_%H_%M_%S", current_time) + ) + os.makedirs(save_folder, exist_ok=True) + if args.mode == "video": + save_path = os.path.join(save_folder, args.input_path.split("/")[-1]) + else: + save_path = os.path.join(save_folder, "camera.mp4") + + logger.info(f"video save_path is {save_path}") + vid_writer = cv2.VideoWriter( + save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (int(width), int(height)) + ) + while True: + ret_val, frame = cap.read() + if ret_val: + result_frame = inference(args, frame) + vid_writer.write(result_frame) + ch = cv2.waitKey(1) + if ch == 27 or ch == ord("q") or ch == ord("Q"): + break + else: + break + +if __name__ == '__main__': + args = make_parser().parse_args() + if args.mode == "image": + image_process(args) + elif args.mode == "video" or args.mode == "webcam": + imageflow_demo(args) + + + \ No newline at end of file diff --git a/Inference_specific_stride.py b/Inference_specific_stride.py new file mode 100644 index 000000000..f7d576cff --- /dev/null +++ b/Inference_specific_stride.py @@ -0,0 +1,194 @@ +import argparse +import os + +import cv2 +import numpy as np +import torch + +from loguru import logger +import time +import onnxruntime +import torchvision.transforms as transforms + +from yolox.data.data_augment import preproc as preprocess +from yolox.utils import mkdir, multiclass_nms, demo_postprocess, vis +from PIL import Image +import copy +np.set_printoptions(threshold=np.inf) + +CLASSES = ( + 'people','car' +) + +def make_parser(): + parser = argparse.ArgumentParser("onnxruntime inference sample") + parser.add_argument( + "--model", + type=str, + default="/home/whoami/Documents/Hanvon/yoloxs_0528.onnx", + help="Input your onnx model.", + ) + parser.add_argument( + "--mode", + type=str, + default="image", + help="mode type, eg. image, video and webcam.", + ) + parser.add_argument( + "--input_path", + type=str, + default='assets/xxx.png', + help="Path to your input image.", + ) + parser.add_argument( + "--camid", + type=int, + default=0, + help="webcam demo camera id", + ) + parser.add_argument( + "--output_path", + type=str, + default='outputs_imgs/total0616', + help="Path to your output directory.", + ) + parser.add_argument( + "-s", + "--score_thr", + type=float, + default=0.3, + help="Score threshould to filter the result.", + ) + parser.add_argument( + "--input_shape", + type=str, + default="320,320", + help="Specify an input shape for inference.", + ) + parser.add_argument( + "--with_p6", + action="store_true", + help="Whether your model uses p6 in FPN/PAN.", + ) + return parser + + +def inference(args, origin_img): + t0 = time.time() + input_shape = tuple(map(int, args.input_shape.split(','))) + + img, ratio = preprocess(origin_img, input_shape) + + session = onnxruntime.InferenceSession(args.model) + + ort_inputs = {session.get_inputs()[0].name: img[None, :, :, :]} + output = session.run(None, ort_inputs) + + predictions = demo_postprocess(output[0], input_shape, p6=args.with_p6)[0] + + + return predictions, ratio + +def image_process(args): + folder_path = '/home/whoami/Documents/Hanvon/Datasets/20230101_002049/' + t0 = time.time() + for filename in os.listdir(folder_path): + img_path = os.path.join(folder_path, filename) + origin_img = cv2.imread(img_path) + print(f"This is {filename}") + + slice_size = (960, 960) + + count = 0 + total_time = 0 + copied_frame = origin_img.copy() + + # for y in [0, 300, 600]: + # for x in [0, 360, 720, 1080, 1440]: + # for y in [0, 300, 600, 900, 1200, 1500, 1680]: + # for x in [0, 360, 720, 1080, 1440, 1800, 2160, 2520, 2880, 3240, 3360]: + # for y in [0, 440]: + # for x in [0, 640, 1280]: + t1 = time.time() + for y in [0, 600, 1200]: + for x in [0, 720, 1440, 2160, 2880]: + + box = (x, y) + # print(f"box is {box}") + + slice_img = copied_frame[y:y + slice_size[1], x:x + slice_size[0]].copy() + + pred, ratio = inference(args, slice_img) + # print("ratio is ", ratio) + boxes = pred[:, :4] + scores = pred[:, 4:5] * pred[:, 5:] + + boxes_xyxy = np.ones_like(boxes) + boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2]/2. + boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3]/2. + boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2]/2. + boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3]/2. + boxes_xyxy /= ratio + dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.5, score_thr=0.6) + if dets is not None: + final_boxes, final_scores, final_cls_inds = dets[:, :4], dets[:, 4], dets[:, 5] + slice_img = vis(slice_img, final_boxes, final_scores, final_cls_inds, + conf=args.score_thr, class_names=CLASSES) + + # 写入原图 + final_boxes[:, :4] += [box[0], box[1], box[0], box[1]] + origin_img = vis(origin_img, final_boxes, final_scores, final_cls_inds, + conf=args.score_thr, class_names=CLASSES) + + + logger.info("{} costs infer time: {:.4f}s".format(filename, time.time() - t1)) + + if not os.path.exists(args.output_path): + os.mkdir(args.output_path) + output_path = os.path.join(args.output_path, filename) + cv2.imwrite(output_path, origin_img) + + logger.info("Total Infer time: {:.4f}s".format(time.time() - t0)) + + +def imageflow_demo(args): + cap = cv2.VideoCapture(args.input_path if args.mode == "video" else args.camid) + width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float + height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float + fps = cap.get(cv2.CAP_PROP_FPS) + + mkdir(args.output_path) + current_time = time.localtime() + save_folder = os.path.join( + args.output_path, time.strftime("%Y_%m_%d_%H_%M_%S", current_time) + ) + os.makedirs(save_folder, exist_ok=True) + if args.mode == "video": + save_path = os.path.join(save_folder, args.input_path.split("/")[-1]) + else: + save_path = os.path.join(save_folder, "camera.mp4") + + logger.info(f"video save_path is {save_path}") + vid_writer = cv2.VideoWriter( + save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (int(width), int(height)) + ) + while True: + ret_val, frame = cap.read() + if ret_val: + result_frame = inference(args, frame) + vid_writer.write(result_frame) + ch = cv2.waitKey(1) + if ch == 27 or ch == ord("q") or ch == ord("Q"): + break + else: + break + +if __name__ == '__main__': + args = make_parser().parse_args() + if args.mode == "image": + image_process(args) + elif args.mode == "video" or args.mode == "webcam": + imageflow_demo(args) + + + \ No newline at end of file diff --git a/Inference_specific_stride_one.py b/Inference_specific_stride_one.py new file mode 100644 index 000000000..5f757bcb0 --- /dev/null +++ b/Inference_specific_stride_one.py @@ -0,0 +1,201 @@ +import argparse +import os + +import cv2 +import numpy as np +import torch + +from loguru import logger +import time +import onnxruntime +import torchvision.transforms as transforms + +from yolox.data.data_augment import preproc as preprocess +from yolox.utils import mkdir, multiclass_nms, demo_postprocess, vis +from PIL import Image +import copy +np.set_printoptions(threshold=np.inf) + +CLASSES = ( + 'people','car' +) + +def make_parser(): + parser = argparse.ArgumentParser("onnxruntime inference sample") + parser.add_argument( + "--model", + type=str, + default="/home/whoami/Documents/Hanvon/yoloxs_0528.onnx", + help="Input your onnx model.", + ) + parser.add_argument( + "--mode", + type=str, + default="image", + help="mode type, eg. image, video and webcam.", + ) + parser.add_argument( + "--input_path", + type=str, + default='/home/whoami/Documents/Hanvon/云台侦查/pics0607/frame_27.png', + help="Path to your input image.", + ) + parser.add_argument( + "--camid", + type=int, + default=0, + help="webcam demo camera id", + ) + parser.add_argument( + "--output_path", + type=str, + default='outputs_imgs/', + help="Path to your output directory.", + ) + parser.add_argument( + "-s", + "--score_thr", + type=float, + default=0.3, + help="Score threshould to filter the result.", + ) + parser.add_argument( + "--input_shape", + type=str, + default="320,320", + help="Specify an input shape for inference.", + ) + parser.add_argument( + "--with_p6", + action="store_true", + help="Whether your model uses p6 in FPN/PAN.", + ) + return parser + + +def inference(args, origin_img): + t0 = time.time() + input_shape = tuple(map(int, args.input_shape.split(','))) + + img, ratio = preprocess(origin_img, input_shape) + + session = onnxruntime.InferenceSession(args.model) + + ort_inputs = {session.get_inputs()[0].name: img[None, :, :, :]} + output = session.run(None, ort_inputs) + + predictions = demo_postprocess(output[0], input_shape, p6=args.with_p6)[0] + + + return predictions, ratio + +def image_process(args): + origin_img = cv2.imread(args.input_path) + + slice_size = (1080, 1080) + + count = 0 + total_time = 0 + copied_frame = origin_img.copy()# copy.deepcopy(origin_img) + output_path = os.path.join(args.output_path, args.input_path.split("/")[-1]) + + crop_path = 'outputs_imgs/crop_000' + if not os.path.exists(crop_path): + os.makedirs(crop_path) + + + # for y in [0, 300, 600, 900, 1200, 1500, 1680]: + # for x in [0, 360, 720, 1080, 1440, 1800, 2160, 2520, 2880, 3240, 3360]: + # for y in [0, 300, 600]: + # for x in [0, 360, 720, 1080, 1440]: + # for y in [0, 440]: + # for x in [0, 640, 1280]: + for y in [0]: + for x in [0, 840]: + t0 = time.time() + + box = (x, y) + print(f"box is {box}") + + slice_img = copied_frame[y:y + slice_size[1], x:x + slice_size[0]].copy() + + pred, ratio = inference(args, slice_img) + + print(f"ratio is {ratio}") + + boxes = pred[:, :4] + scores = pred[:, 4:5] * pred[:, 5:] + + boxes_xyxy = np.ones_like(boxes) + boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2]/2. + boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3]/2. + boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2]/2. + boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3]/2. + boxes_xyxy /= ratio + dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.5, score_thr=0.5) + if dets is not None: + final_boxes, final_scores, final_cls_inds = dets[:, :4], dets[:, 4], dets[:, 5] + slice_img = vis(slice_img, final_boxes, final_scores, final_cls_inds, + conf=args.score_thr, class_names=CLASSES) + + # 写入原图 + final_boxes[:, :4] += [box[0], box[1], box[0], box[1]] + origin_img = vis(origin_img, final_boxes, final_scores, final_cls_inds, + conf=args.score_thr, class_names=CLASSES) + + + cv2.imwrite(os.path.join(crop_path, f"cropped_image_{count}.jpg"), slice_img) + count += 1 + + logger.info("Infer time: {:.4f}s".format(time.time() - t0)) + total_time += time.time() - t0 + + + + + output_path = os.path.join(args.output_path, args.input_path.split("/")[-1]) + cv2.imwrite(output_path, origin_img) + print(f"Total used time is {total_time}s") + + +def imageflow_demo(args): + cap = cv2.VideoCapture(args.input_path if args.mode == "video" else args.camid) + width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float + height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float + fps = cap.get(cv2.CAP_PROP_FPS) + + mkdir(args.output_path) + current_time = time.localtime() + save_folder = os.path.join( + args.output_path, time.strftime("%Y_%m_%d_%H_%M_%S", current_time) + ) + os.makedirs(save_folder, exist_ok=True) + if args.mode == "video": + save_path = os.path.join(save_folder, args.input_path.split("/")[-1]) + else: + save_path = os.path.join(save_folder, "camera.mp4") + + logger.info(f"video save_path is {save_path}") + vid_writer = cv2.VideoWriter( + save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (int(width), int(height)) + ) + while True: + ret_val, frame = cap.read() + if ret_val: + result_frame = inference(args, frame) + vid_writer.write(result_frame) + ch = cv2.waitKey(1) + if ch == 27 or ch == ord("q") or ch == ord("Q"): + break + else: + break + +if __name__ == '__main__': + args = make_parser().parse_args() + if args.mode == "image": + image_process(args) + elif args.mode == "video" or args.mode == "webcam": + imageflow_demo(args) + + + \ No newline at end of file diff --git a/Inference_video_0529.py b/Inference_video_0529.py new file mode 100644 index 000000000..9b8188fbb --- /dev/null +++ b/Inference_video_0529.py @@ -0,0 +1,324 @@ +import argparse +import os + +import cv2 +import numpy as np +import torch + +from loguru import logger +import time +import onnxruntime +import torchvision.transforms as transforms + +from yolox.data.data_augment import preproc as preprocess +from yolox.utils import mkdir, multiclass_nms, demo_postprocess, vis +from PIL import Image + +CLASSES = ( + 'people','car' +) + +def make_parser(): + parser = argparse.ArgumentParser("onnxruntime inference sample") + parser.add_argument( + "--model", + type=str, + default="/home/whoami/Documents/Hanvon/yoloxs0413_320.onnx", + help="Input your onnx model.", + ) + parser.add_argument( + "--mode", + type=str, + default="video", + help="mode type, eg. image, video and webcam.", + ) + parser.add_argument( + "--input_path", + type=str, + default='/home/whoami/Documents/Hanvon/13-2/20230101_000613_vflip.MP4', + help="Path to your input image/video/webcam.", + ) + parser.add_argument( + "--camid", + type=int, + default=0, + help="webcam demo camera id", + ) + parser.add_argument( + "--output_path", + type=str, + default='outputs_videos', + help="Path to your output directory.", + ) + parser.add_argument( + "-s", + "--score_thr", + type=float, + default=0.5, + help="Score threshould to filter the result.", + ) + parser.add_argument( + "--input_shape", + type=str, + default="320,320", + help="Specify an input shape for inference.", + ) + parser.add_argument( + "--with_p6", + action="store_true", + help="Whether your model uses p6 in FPN/PAN.", + ) + return parser + + +def inference(args, origin_img): + # t0 = time.time() + input_shape = tuple(map(int, args.input_shape.split(','))) + + img, ratio = preprocess(origin_img, input_shape) + + session = onnxruntime.InferenceSession(args.model) + + # ort_inputs = {session.get_inputs()[0].name: img[None, :, :, :]} + ort_inputs = {session.get_inputs()[0].name: img[None, :, :, :]} + output = session.run(None, ort_inputs) + # print(output[0].shape) # (1, 2100, 7) + predictions = demo_postprocess(output[0], input_shape, p6=args.with_p6)[0] + + return predictions, ratio + +def image_process(args): + origin_img = cv2.imread(args.input_path) + origin_img = inference(args, origin_img) + mkdir(args.output_path) + output_path = os.path.join(args.output_path, args.input_path.split("/")[-1]) + logger.info("Saving detection result in {}".format(output_path)) + cv2.imwrite(output_path, origin_img) + +def imageflow_demo(args): + cap = cv2.VideoCapture(args.input_path if args.mode == "video" else args.camid) + width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float + height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float + fps = cap.get(cv2.CAP_PROP_FPS) + + mkdir(args.output_path) + current_time = time.localtime() + save_folder = os.path.join( + args.output_path, time.strftime("%Y_%m_%d_%H_%M_%S", current_time) + ) + os.makedirs(save_folder, exist_ok=True) + if args.mode == "video": + save_path = os.path.join(save_folder, args.input_path.split("/")[-1]) + else: + save_path = os.path.join(save_folder, "camera.mp4") + + logger.info(f"video save_path is {save_path}") + vid_writer = cv2.VideoWriter( + save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (int(width), int(height)) + ) + while True: + ret_val, frame = cap.read() + + # 定义切片参数 + slice_size = (1080, 1080) + + overlap = 0 + + # 对图像进行切片 + copied_frame = frame.copy() + stride = int(slice_size[0] * (1 - overlap)) + slices = [] + # for y in range(0, int(height), stride): + # for x in range(0, int(width), stride): + for y in [0]: + for x in [0, 840]: + box = (x, y) + slice_img = copied_frame[y:y + slice_size[1], x:x + slice_size[0]].copy() + + slices.append((slice_img, box)) # 保存切片及其在原图中的位置信息 + + # 对每个切片进行目标检测 + if ret_val: + t0 = time.time() + results = [] + for slice_img, box in slices: + # slice_tensor = transform(slice_img).unsqueeze(0).cuda() + pred, ratio = inference(args, slice_img) + results.append((pred, box)) + + # 合并检测结果并映射回原始图像的坐标空间 + for preds, box in results: + if preds is not None: + + boxes = preds[:, :4] + scores = preds[:, 4:5] * preds[:, 5:] + + boxes_xyxy = np.ones_like(boxes) + boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2]/2. + boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3]/2. + boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2]/2. + boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3]/2. + boxes_xyxy /= ratio + boxes_xyxy[:, :4] += [box[0], box[1], box[0], box[1]] # 映射回原始图像坐标 + + dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=0.45) + if dets is not None: + final_boxes, final_scores, final_cls_inds = dets[:, :4], dets[:, 4], dets[:, 5] + frame = vis(frame, final_boxes, final_scores, final_cls_inds, + conf=args.score_thr, class_names=CLASSES) + + logger.info("{} costs: {:.4f}s".format(frame, time.time() - t0)) + + cv2.imshow('frame',frame) + # 写入拼接后的图像帧 + vid_writer.write(frame) + ch = cv2.waitKey(1) + if ch == 27 or ch == ord("q") or ch == ord("Q"): + break + else: + break + + +if __name__ == '__main__': + args = make_parser().parse_args() + if args.mode == "image": + image_process(args) + elif args.mode == "video" or args.mode == "webcam": + imageflow_demo(args) + + + + + + + + + + + + + +# import os, sys + +# sys.path.append(os.getcwd()) +# import onnxruntime +# import torchvision.models as models +# import torchvision.transforms as transforms +# from PIL import Image + + +# def to_numpy(tensor): +# return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy() + +# # 自定义的数据增强 +# def get_test_transform(): +# return transforms.Compose([ +# transforms.Resize([320, 320]), +# transforms.ToTensor(), +# transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), +# ]) + +# # 推理的图片路径 +# image = Image.open('assets/car_and_person.png').convert('RGB') + +# img = get_test_transform()(image) +# img = img.unsqueeze_(0) # -> NCHW, 1,3,224,224 +# # 模型加载 +# onnx_model_path = "/home/whoami/Documents/Hanvon/yoloxs0413_320.onnx" +# resnet_session = onnxruntime.InferenceSession(onnx_model_path) +# inputs = {resnet_session.get_inputs()[0].name: to_numpy(img)} +# outs = resnet_session.run(None, inputs)[0] +# print("onnx weights", outs) +# print("onnx prediction", outs.argmax(axis=1)[0]) + + + + +# import torch +# from torchviz import make_dot +# import onnx +# import torchvision.transforms as transforms +# from PIL import Image + +# # 自定义的数据增强 +# def get_test_transform(): +# return transforms.Compose([ +# transforms.Resize([320, 320]), +# transforms.ToTensor(), +# transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), +# ]) + +# # 推理的图片路径 +# image = Image.open('assets/car_and_person.png').convert('RGB') + +# img = get_test_transform()(image) +# img = img.unsqueeze_(0) # -> NCHW, 1,3,224,224 + +# # 加载 ONNX 模型 +# onnx_model = onnx.load("/home/whoami/Documents/Hanvon/yoloxs0413_320.onnx") + +# # 使用 torch.onnx 中的函数将 ONNX 模型导入为 PyTorch 模型 +# # 注意:这里需要确保你的 PyTorch 版本支持 ONNX 模型的导入 +# pytorch_model = torch.onnx.imports(model=onnx_model, opset_version=11) + +# # 可视化 PyTorch 模型的计算图 +# # 这里需要提供一个示例输入以生成计算图 +# # 你需要替换这个示例输入为你模型接受的实际输入 +# # example_input = torch.randn(1, 3, 224, 224) # 这里假设模型接受大小为(1, 3, 224, 224)的输入 +# dot = make_dot(pytorch_model(img), params=dict(pytorch_model.named_parameters())) +# dot.render('pytorch_model', format='png') # 可以选择将计算图保存为图片 + + + + +# import torch +# import torch.onnx +# import os +# import torchvision.transforms as transforms +# from PIL import Image +# dependencies = ["torch"] +# from yolox.models import( +# yolox_tiny, +# yolox_nano, +# yolox_s, +# yolox_m, +# yolox_l, +# yolox_x, +# yolov3, +# yolox_custom +# ) + +# # 自定义的数据增强 +# def get_test_transform(): +# return transforms.Compose([ +# transforms.Resize([320, 320]), +# transforms.ToTensor(), +# transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), +# ]) + +# # 推理的图片路径 +# image = Image.open('assets/car_and_person.png').convert('RGB') + +# img = get_test_transform()(image) +# img = img.unsqueeze_(0) # -> NCHW, 1,3,320,320 + +# def pth_to_onnx(input, checkpoint, onnx_path, input_names=['input'], output_names=['output'], device='cpu'): +# if not onnx_path.endswith('.onnx'): +# print('Warning! The onnx model name is not correct,\ +# please give a name that ends with \'.onnx\'!') +# return 0 + +# model = torch.hub.load("Megvii-BaseDetection/YOLOX", "yolox_s") #导入模型 +# model.load_state_dict(torch.load(checkpoint)["model"]) #初始化权重 +# model.eval() +# # model.to(device) + +# torch.onnx.export(model, input, onnx_path, verbose=True, input_names=input_names, output_names=output_names) #指定模型的输入,以及onnx的输出路径 +# print("Exporting .pth model to onnx model has been successful!") + +# if __name__ == '__main__': + +# checkpoint = '/home/whoami/Downloads/yolox_s.pth' +# onnx_path = '/home/whoami/Downloads/yolox_s.onnx' +# # device = torch.device("cuda:2" if torch.cuda.is_available() else 'cpu') +# pth_to_onnx(img.cuda(), checkpoint, onnx_path) diff --git a/demo/OpenVINO/cpp/README.md b/demo/OpenVINO/cpp/README.md index c877d94c2..1a51c3316 100644 --- a/demo/OpenVINO/cpp/README.md +++ b/demo/OpenVINO/cpp/README.md @@ -25,7 +25,7 @@ Please visit [Openvino Homepage](https://docs.openvinotoolkit.org/latest/get_sta **Option1. Set up the environment tempororally. You need to run this command everytime you start a new shell window.** ```shell -source /opt/intel/openvino_2021/bin/setupvars.sh +source /opt/intel/openvino_2022/setupvars.sh ``` **Option2. Set up the environment permenantly.** @@ -38,7 +38,7 @@ vim ~/.bashrc *Step2.* Add the following line into your file: ```shell -source /opt/intel/openvino_2021/bin/setupvars.sh +source /opt/intel/openvino_2022/setupvars.sh ``` *Step3.* Save and exit the file, then run: diff --git a/tools/demo.py b/tools/demo.py index b16598d5f..008a2fd5b 100644 --- a/tools/demo.py +++ b/tools/demo.py @@ -11,6 +11,9 @@ import torch +import sys +sys.path.append('/home/whoami/Documents/Hanvon/YOLOX') + from yolox.data.data_augment import ValTransform from yolox.data.datasets import COCO_CLASSES from yolox.exp import get_exp @@ -22,13 +25,13 @@ def make_parser(): parser = argparse.ArgumentParser("YOLOX Demo!") parser.add_argument( - "demo", default="image", help="demo type, eg. image, video and webcam" + "--demo", default="image", help="demo type, eg. image, video and webcam" ) - parser.add_argument("-expn", "--experiment-name", type=str, default=None) - parser.add_argument("-n", "--name", type=str, default=None, help="model name") + parser.add_argument("-expn", "--experiment-name", type=str, default="yolox-s") + parser.add_argument("-n", "--name", type=str, default="yolox-s", help="model name") parser.add_argument( - "--path", default="./assets/dog.jpg", help="path to images or video" + "--path", default="./assets/001.jpg", help="path to images or video" ) parser.add_argument("--camid", type=int, default=0, help="webcam demo camera id") parser.add_argument( @@ -311,6 +314,11 @@ def main(exp, args): image_demo(predictor, vis_folder, args.path, current_time, args.save_result) elif args.demo == "video" or args.demo == "webcam": imageflow_demo(predictor, vis_folder, current_time, args) + + # print the model + # Print layers of the model + # for name, module in model.named_children(): + # print(name, module) if __name__ == "__main__": diff --git a/tools/export_onnx.py b/tools/export_onnx.py index 8703166a4..e0e447c85 100644 --- a/tools/export_onnx.py +++ b/tools/export_onnx.py @@ -9,6 +9,9 @@ import torch from torch import nn +import sys +sys.path.append('/home/whoami/Documents/Hanvon/YOLOX') + from yolox.exp import get_exp from yolox.models.network_blocks import SiLU from yolox.utils import replace_module @@ -17,7 +20,7 @@ def make_parser(): parser = argparse.ArgumentParser("YOLOX onnx deploy") parser.add_argument( - "--output-name", type=str, default="yolox.onnx", help="output name of models" + "--output-name", type=str, default="yolox-s.onnx", help="output name of models" ) parser.add_argument( "--input", default="images", type=str, help="input node name of onnx model" @@ -40,8 +43,8 @@ def make_parser(): type=str, help="experiment description file", ) - parser.add_argument("-expn", "--experiment-name", type=str, default=None) - parser.add_argument("-n", "--name", type=str, default=None, help="model name") + parser.add_argument("-expn", "--experiment-name", type=str, default="yolox-s") + parser.add_argument("-n", "--name", type=str, default="yolox-s", help="model name") parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt path") parser.add_argument( "opts", diff --git a/tools/visualize_assign.py b/tools/visualize_assign.py index e75a5586b..e5366980b 100644 --- a/tools/visualize_assign.py +++ b/tools/visualize_assign.py @@ -11,10 +11,12 @@ import torch import torch.backends.cudnn as cudnn +import sys +sys.path.append('/home/whoami/Documents/Hanvon/YOLOX') from yolox.exp import Exp, get_exp from yolox.core import Trainer from yolox.utils import configure_module, configure_omp -from yolox.tools.train import make_parser +from tools.train import make_parser class AssignVisualizer(Trainer): diff --git a/yolox/exp/build.py b/yolox/exp/build.py index ef83f76fa..dcc089d1a 100644 --- a/yolox/exp/build.py +++ b/yolox/exp/build.py @@ -31,7 +31,7 @@ def get_exp(exp_file=None, exp_name=None): Args: exp_file (str): file path of experiment. - exp_name (str): name of experiment. "yolo-s", + exp_name (str): name of experiment. "yolox-s", """ assert ( exp_file is not None or exp_name is not None diff --git a/yolox/utils/demo_utils.py b/yolox/utils/demo_utils.py index 56dd33686..6f3b63679 100644 --- a/yolox/utils/demo_utils.py +++ b/yolox/utils/demo_utils.py @@ -123,6 +123,18 @@ def multiclass_nms_class_agnostic(boxes, scores, nms_thr, score_thr): cls_scores = scores[np.arange(len(cls_inds)), cls_inds] valid_score_mask = cls_scores > score_thr + for i in range(2100): + if valid_score_mask[i] == True: + if cls_inds[i] == 1 and cls_scores[i] < 0.75: + valid_score_mask[i] = False + if cls_inds[i] == 1 and cls_scores[i] > 0.75: + if boxes[i][2] - boxes[i][0] > 300 or boxes[i][3] - boxes[i][1] > 300: + valid_score_mask[i] = False + if boxes[i][2] - boxes[i][0] < 125 or boxes[i][3] - boxes[i][1] < 80: + valid_score_mask[i] = False + else: + continue + if valid_score_mask.sum() == 0: return None valid_scores = cls_scores[valid_score_mask] diff --git a/yolox/utils/model_utils.py b/yolox/utils/model_utils.py index 3bc2d1ff7..190f78480 100644 --- a/yolox/utils/model_utils.py +++ b/yolox/utils/model_utils.py @@ -29,6 +29,11 @@ def get_model_info(model: nn.Module, tsize: Sequence[int]) -> str: flops /= 1e9 flops *= tsize[0] * tsize[1] / stride / stride * 2 # Gflops info = "Params: {:.2f}M, Gflops: {:.2f}".format(params, flops) + + # # print stat + # from torchstat import stat + # stat(model, (3, 640, 640)) + return info