|
15 | 15 | import cv2 |
16 | 16 | import argparse |
17 | 17 | import sys |
| 18 | +import numpy as np |
18 | 19 |
|
19 | 20 | ##################################################################### |
20 | 21 |
|
| 22 | +# concatenate two RGB/grayscale images horizontally (left to right) |
| 23 | +# handling differing channel numbers or image heights in the input |
| 24 | + |
| 25 | + |
| 26 | +def h_concat(img1, img2): |
| 27 | + |
| 28 | + # get size and channels for both images |
| 29 | + |
| 30 | + height1 = img1.shape[0] |
| 31 | + # width1 = img1.shape[1] |
| 32 | + if (len(img1.shape) == 2): |
| 33 | + channels1 = 1 |
| 34 | + else: |
| 35 | + channels1 = img1.shape[2] |
| 36 | + |
| 37 | + height2 = img2.shape[0] |
| 38 | + width2 = img2.shape[1] |
| 39 | + if (len(img2.shape) == 2): |
| 40 | + channels2 = 1 |
| 41 | + else: |
| 42 | + channels2 = img2.shape[2] |
| 43 | + |
| 44 | + # make all images 3 channel, or assume all same channel |
| 45 | + |
| 46 | + if ((channels1 > channels2) and (channels1 == 3)): |
| 47 | + out2 = cv2.cvtColor(img2, cv2.COLOR_GRAY2BGR) |
| 48 | + out1 = img1 |
| 49 | + elif ((channels2 > channels1) and (channels2 == 3)): |
| 50 | + out1 = cv2.cvtColor(img1, cv2.COLOR_GRAY2BGR) |
| 51 | + out2 = img2 |
| 52 | + else: # both must be equal |
| 53 | + out1 = img1 |
| 54 | + out2 = img2 |
| 55 | + |
| 56 | + # height of first image is master height, width remains unchanged |
| 57 | + |
| 58 | + if (height1 != height2): |
| 59 | + out2 = cv2.resize(out2, (height1, width2)) |
| 60 | + |
| 61 | + return np.hstack((out1, out2)) |
| 62 | + |
| 63 | +##################################################################### |
| 64 | + |
| 65 | +# concatenate two RGB/grayscale images vertically (top to bottom) |
| 66 | +# handling differing channel numbers or image heights in the input |
| 67 | + |
| 68 | + |
| 69 | +def v_concat(img1, img2): |
| 70 | + |
| 71 | + # get size and channels for both images |
| 72 | + |
| 73 | + # height1 = img1.shape[0] |
| 74 | + width1 = img1.shape[1] |
| 75 | + if (len(img1.shape) == 2): |
| 76 | + channels1 = 1 |
| 77 | + else: |
| 78 | + channels1 = img1.shape[2] |
| 79 | + |
| 80 | + height2 = img2.shape[0] |
| 81 | + width2 = img2.shape[1] |
| 82 | + if (len(img2.shape) == 2): |
| 83 | + channels2 = 1 |
| 84 | + else: |
| 85 | + channels2 = img2.shape[2] |
| 86 | + |
| 87 | + # make all images 3 channel, or assume all same channel |
| 88 | + |
| 89 | + if ((channels1 > channels2) and (channels1 == 3)): |
| 90 | + out2 = cv2.cvtColor(img2, cv2.COLOR_GRAY2BGR) |
| 91 | + out1 = img1 |
| 92 | + elif ((channels2 > channels1) and (channels2 == 3)): |
| 93 | + out1 = cv2.cvtColor(img1, cv2.COLOR_GRAY2BGR) |
| 94 | + out2 = img2 |
| 95 | + else: # both must be equal |
| 96 | + out1 = img1 |
| 97 | + out2 = img2 |
| 98 | + |
| 99 | + # width of first image is master height, height remains unchanged |
| 100 | + |
| 101 | + if (width1 != width2): |
| 102 | + out2 = cv2.resize(out2, (height2, width1)) |
| 103 | + |
| 104 | + return np.vstack((out1, out2)) |
| 105 | + |
| 106 | +##################################################################### |
| 107 | + |
| 108 | + |
21 | 109 | keep_processing = True |
22 | 110 |
|
23 | 111 | # parse command line arguments for camera ID or video file |
|
44 | 132 | type=int, |
45 | 133 | nargs=2, |
46 | 134 | help='override default camera resolution as H W') |
| 135 | +parser.add_argument( |
| 136 | + "-fs", |
| 137 | + "--fullscreen", |
| 138 | + action='store_true', |
| 139 | + help="run in full screen mode") |
47 | 140 | parser.add_argument( |
48 | 141 | 'video_file', |
49 | 142 | metavar='video_file', |
|
143 | 236 | fgdilated = cv2.dilate( |
144 | 237 | fgthres, kernel=cv2.getStructuringElement( |
145 | 238 | cv2.MORPH_ELLIPSE, (3, 3)), iterations=3) |
| 239 | + fgderoded = cv2.erode( |
| 240 | + fgdilated, kernel=cv2.getStructuringElement( |
| 241 | + cv2.MORPH_ELLIPSE, (3, 3)), iterations=3) |
146 | 242 |
|
147 | 243 | # get current background image (representative of current GMM model) |
148 | 244 |
|
149 | 245 | bgmodel = mog.getBackgroundImage() |
150 | 246 |
|
151 | 247 | # display images - input, background and original |
152 | 248 |
|
153 | | - cv2.imshow(window_name, frame) |
154 | | - cv2.imshow(window_nameFG, fgdilated) |
155 | | - cv2.imshow(window_nameFGP, fgmask) |
156 | | - cv2.imshow(window_nameBG, bgmodel) |
| 249 | + if (args.fullscreen): |
| 250 | + |
| 251 | + window_name = "[ Live | BG | Pr(FG) | FG ]" |
| 252 | + cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) |
| 253 | + cv2.imshow(window_name, v_concat( |
| 254 | + h_concat(frame, bgmodel), |
| 255 | + h_concat(fgmask, fgderoded) |
| 256 | + )) |
| 257 | + cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, |
| 258 | + cv2.WINDOW_FULLSCREEN & args.fullscreen) |
| 259 | + |
| 260 | + else: |
| 261 | + |
| 262 | + cv2.imshow(window_name, frame) |
| 263 | + cv2.imshow(window_nameFG, fgderoded) |
| 264 | + cv2.imshow(window_nameFGP, fgmask) |
| 265 | + cv2.imshow(window_nameBG, bgmodel) |
157 | 266 |
|
158 | 267 | # start the event loop - essential |
159 | 268 |
|
|
180 | 289 | print("\nResetting MoG background model ...\n") |
181 | 290 | mog = cv2.createBackgroundSubtractorMOG2( |
182 | 291 | history=2000, varThreshold=16, detectShadows=True) |
| 292 | + elif (key == ord('f')): |
| 293 | + args.fullscreen = not (args.fullscreen) |
183 | 294 |
|
184 | 295 | # close all windows |
185 | 296 |
|
|
0 commit comments