|
17 | 17 | }, |
18 | 18 | "outputs": [], |
19 | 19 | "source": [ |
20 | | - "import cv2 as cv \n", |
21 | | - "import numpy as np \n", |
| 20 | + "import cv2 as cv\n", |
| 21 | + "import numpy as np\n", |
22 | 22 | "import matplotlib.pyplot as plt\n", |
23 | 23 | "import skimage\n", |
24 | 24 | "\n", |
25 | | - "plt.rcParams['figure.figsize'] = [10, 10] \n", |
26 | | - "plt.rcParams['figure.dpi'] = 80 " |
| 25 | + "plt.rcParams[\"figure.figsize\"] = [10, 10]\n", |
| 26 | + "plt.rcParams[\"figure.dpi\"] = 80" |
27 | 27 | ] |
28 | 28 | }, |
29 | 29 | { |
|
32 | 32 | "source": [ |
33 | 33 | "## Loading an image from a file\n", |
34 | 34 | "\n", |
35 | | - "Fuction for the reading of the image from the file cv.[imread(...)](https://docs.opencv.org/4.4.0/d4/da8/group__imgcodecs.html#ga288b8b3da0892bd651fce07b3bbd3a56) (supported formats are listed in the referenced documentation). To save the image to the file is possible by the function cv.[imwrite(...)](https://docs.opencv.org/master/d4/da8/group__imgcodecs.html#gabbc7ef1aa2edfaa87772f1202d67e0ce)" |
| 35 | + "Fuction for the reading of the image from the file cv.[imread(...)](https://docs.opencv.org/4.4.0/d4/da8/group__imgcodecs.html#ga288b8b3da0892bd651fce07b3bbd3a56) (supported formats are listed in the referenced documentation). To save the image to the file is possible by the function cv.[imwrite(...)](https://docs.opencv.org/master/d4/da8/group__imgcodecs.html#gabbc7ef1aa2edfaa87772f1202d67e0ce)." |
36 | 36 | ] |
37 | 37 | }, |
38 | 38 | { |
|
41 | 41 | "metadata": {}, |
42 | 42 | "outputs": [], |
43 | 43 | "source": [ |
44 | | - "#LOAD IMAGE FROM REMOTE SOURCE\n", |
45 | | - "img = skimage.io.imread('https://github.com/CVUT-FS-12110/Machine-Perception-and-Image-Analysis/blob/master/src/lectures/01_openCV/data/2_foto.jpg?raw=true')\n", |
| 44 | + "# LOAD IMAGE FROM REMOTE SOURCE\n", |
| 45 | + "img = skimage.io.imread(\"https://github.com/CVUT-FS-12110/Machine-Perception-and-Image-Analysis/blob/master/src/lectures/01_openCV/data/2_foto.jpg?raw=true\")\n", |
46 | 46 | "img = cv.cvtColor(img, cv.COLOR_RGB2BGR)\n", |
47 | 47 | "\n", |
48 | | - "#LOAD IMAGE FROM LOCAL DRIVE\n", |
49 | | - "#img = cv.imread('./data/2_foto.jpg')" |
| 48 | + "# LOAD IMAGE FROM LOCAL DRIVE\n", |
| 49 | + "# img = cv.imread(\"./data/2_foto.jpg\")" |
50 | 50 | ] |
51 | 51 | }, |
52 | 52 | { |
|
71 | 71 | "metadata": {}, |
72 | 72 | "outputs": [], |
73 | 73 | "source": [ |
74 | | - "print('Dimensions: {}'.format(img.ndim))\n", |
75 | | - "print('Shape: {}'.format(img.shape))\n", |
76 | | - "print('Byte size: {} B'.format(img.size))\n", |
77 | | - "print('Data type: {}'.format(img.dtype))" |
| 74 | + "print(\"Dimensions: {}\".format(img.ndim))\n", |
| 75 | + "print(\"Shape: {}\".format(img.shape))\n", |
| 76 | + "print(\"Byte size: {} B\".format(img.size))\n", |
| 77 | + "print(\"Data type: {}\".format(img.dtype))" |
78 | 78 | ] |
79 | 79 | }, |
80 | 80 | { |
|
111 | 111 | "outputs": [], |
112 | 112 | "source": [ |
113 | 113 | "def bgr2rgb(bgr_image):\n", |
114 | | - " b,g,r = cv.split(bgr_image) \n", |
115 | | - " return cv.merge([r,g,b]) " |
| 114 | + " b, g, r = cv.split(bgr_image)\n", |
| 115 | + " return cv.merge([r, g, b])" |
116 | 116 | ] |
117 | 117 | }, |
118 | 118 | { |
|
128 | 128 | "cell_type": "markdown", |
129 | 129 | "metadata": {}, |
130 | 130 | "source": [ |
131 | | - "After switching the channels, the picture is displayed correctly. Individual color components can be rendered using the same function, but using only one color channel. To display the channel in the corresponding color, you need to pass the *cmap* argument to the *plt.imread(...)* function ([colormap](https://matplotlib.org/3.3.2/tutorials/colors/colormaps.html)) .\n" |
| 131 | + "After switching the channels, the picture is displayed correctly. Individual color components can be rendered using the same function, but using only one color channel. To display the channel in the corresponding color, you need to pass the *cmap* argument to the *plt.imread(...)* function ([colormap](https://matplotlib.org/3.3.2/tutorials/colors/colormaps.html))." |
132 | 132 | ] |
133 | 133 | }, |
134 | 134 | { |
|
138 | 138 | "outputs": [], |
139 | 139 | "source": [ |
140 | 140 | "plt.subplot(311)\n", |
141 | | - "plt.imshow(img[:,:,0], cmap='Blues')\n", |
| 141 | + "plt.imshow(img[:, :, 0], cmap=\"Blues\")\n", |
142 | 142 | "plt.subplot(312)\n", |
143 | | - "plt.imshow(img[:,:,1], cmap='Greens')\n", |
| 143 | + "plt.imshow(img[:, :, 1], cmap=\"Greens\")\n", |
144 | 144 | "plt.subplot(313)\n", |
145 | | - "plt.imshow(img[:,:,2], cmap='Reds')" |
| 145 | + "plt.imshow(img[:, :, 2], cmap=\"Reds\")" |
146 | 146 | ] |
147 | 147 | }, |
148 | 148 | { |
|
152 | 152 | "## Conversions between color space formats\n", |
153 | 153 | "The function cv.[cvtColor(...)](https://docs.opencv.org/3.4/d8/d01/group__imgproc__color__conversions.html#ga397ae87e1288a81d2363b61574eb8cab) is used to convert between different image color space formats, whose required arguments are the image that is to be converted and [conversion code](https://docs.opencv.org/3.4/d8/d01/group__imgproc__color__conversions.html#ga4e0972be5de079fed4e3a10e24ef5ef0).\n", |
154 | 154 | "\n", |
155 | | - "For example, the code cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) is for the convertion of the *BGR* image to grayscale. The conversion algorithm is documented [here](https://docs.opencv.org/3.4/de/d25/imgproc_color_conversions.html)" |
| 155 | + "For example, the code cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) is for the convertion of the *BGR* image to grayscale. The conversion algorithm is documented [here](https://docs.opencv.org/3.4/de/d25/imgproc_color_conversions.html)." |
156 | 156 | ] |
157 | 157 | }, |
158 | 158 | { |
|
170 | 170 | "metadata": {}, |
171 | 171 | "outputs": [], |
172 | 172 | "source": [ |
173 | | - "plt.imshow(gray[:,:], cmap='gray') " |
| 173 | + "plt.imshow(gray[:, :], cmap=\"gray\")" |
174 | 174 | ] |
175 | 175 | }, |
176 | 176 | { |
|
192 | 192 | "source": [ |
193 | 193 | "hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)\n", |
194 | 194 | "plt.subplot(311)\n", |
195 | | - "plt.imshow(hsv[:,:,0], cmap='gray')\n", |
196 | | - "plt.title('HUE')\n", |
| 195 | + "plt.imshow(hsv[:, :, 0], cmap=\"gray\")\n", |
| 196 | + "plt.title(\"HUE\")\n", |
197 | 197 | "plt.subplot(312)\n", |
198 | | - "plt.imshow(hsv[:,:,1], cmap='gray')\n", |
199 | | - "plt.title('SATURATION')\n", |
| 198 | + "plt.imshow(hsv[:, :, 1], cmap=\"gray\")\n", |
| 199 | + "plt.title(\"SATURATION\")\n", |
200 | 200 | "plt.subplot(313)\n", |
201 | | - "plt.imshow(hsv[:,:,2], cmap='gray')\n", |
202 | | - "plt.title('VALUE');" |
| 201 | + "plt.imshow(hsv[:, :, 2], cmap=\"gray\")\n", |
| 202 | + "plt.title(\"VALUE\")" |
203 | 203 | ] |
204 | 204 | }, |
205 | 205 | { |
|
218 | 218 | "source": [ |
219 | 219 | "hls = cv.cvtColor(img, cv.COLOR_BGR2HLS)\n", |
220 | 220 | "plt.subplot(311)\n", |
221 | | - "plt.imshow(hls[:,:,0], cmap='gray')\n", |
222 | | - "plt.title('HUE')\n", |
| 221 | + "plt.imshow(hls[:, :, 0], cmap=\"gray\")\n", |
| 222 | + "plt.title(\"HUE\")\n", |
223 | 223 | "plt.subplot(312)\n", |
224 | | - "plt.imshow(hls[:,:,1], cmap='gray')\n", |
225 | | - "plt.title('LIGHTNESS')\n", |
| 224 | + "plt.imshow(hls[:, :, 1], cmap=\"gray\")\n", |
| 225 | + "plt.title(\"LIGHTNESS\")\n", |
226 | 226 | "plt.subplot(313)\n", |
227 | | - "plt.imshow(hls[:,:,2], cmap='gray')\n", |
228 | | - "plt.title('SATURATION');" |
| 227 | + "plt.imshow(hls[:, :, 2], cmap=\"gray\")\n", |
| 228 | + "plt.title(\"SATURATION\")" |
229 | 229 | ] |
230 | 230 | }, |
231 | 231 | { |
|
244 | 244 | "source": [ |
245 | 245 | "lab = cv.cvtColor(img, cv.COLOR_BGR2LAB)\n", |
246 | 246 | "plt.subplot(311)\n", |
247 | | - "plt.imshow(lab[:,:,0], cmap='gray')\n", |
248 | | - "plt.title('LIGHNTESS')\n", |
| 247 | + "plt.imshow(lab[:, :, 0], cmap=\"gray\")\n", |
| 248 | + "plt.title(\"LIGHNTESS\")\n", |
249 | 249 | "plt.subplot(312)\n", |
250 | | - "plt.imshow(lab[:,:,1], cmap='RdYlGn_r')\n", |
251 | | - "plt.title('osa zelená-červená')\n", |
| 250 | + "plt.imshow(lab[:, :, 1], cmap=\"RdYlGn_r\")\n", |
| 251 | + "plt.title(\"GREEN-RED AXIS\")\n", |
252 | 252 | "plt.subplot(313)\n", |
253 | | - "plt.imshow(lab[:,:,2], cmap='YlGnBu_r')\n", |
254 | | - "plt.title('osa modrá-žlutá');" |
| 253 | + "plt.imshow(lab[:, :, 2], cmap=\"YlGnBu_r\")\n", |
| 254 | + "plt.title(\"BLUE-YELLOW AXIS\")" |
255 | 255 | ] |
256 | 256 | }, |
257 | 257 | { |
|
269 | 269 | "source": [ |
270 | 270 | "luv = cv.cvtColor(img, cv.COLOR_BGR2LUV)\n", |
271 | 271 | "plt.subplot(311)\n", |
272 | | - "plt.imshow(luv[:,:,0], cmap='gray')\n", |
273 | | - "plt.title('LIGHNTESS')\n", |
| 272 | + "plt.imshow(luv[:, :, 0], cmap=\"gray\")\n", |
| 273 | + "plt.title(\"LIGHNTESS\")\n", |
274 | 274 | "plt.subplot(312)\n", |
275 | | - "plt.imshow(luv[:,:,1], cmap='gray')\n", |
276 | | - "plt.title('osa u')\n", |
| 275 | + "plt.imshow(luv[:, :, 1], cmap=\"gray\")\n", |
| 276 | + "plt.title(\"U AXIS\")\n", |
277 | 277 | "plt.subplot(313)\n", |
278 | | - "plt.imshow(luv[:,:,2], cmap='gray')\n", |
279 | | - "plt.title('osa v');" |
| 278 | + "plt.imshow(luv[:, :, 2], cmap=\"gray\")\n", |
| 279 | + "plt.title(\"V AXIS\")" |
280 | 280 | ] |
281 | 281 | }, |
282 | 282 | { |
|
301 | 301 | "metadata": {}, |
302 | 302 | "outputs": [], |
303 | 303 | "source": [ |
304 | | - "color = ('b','g','r') # tuple kódů barev pro matplotlib\n", |
| 304 | + "color = (\"b\", \"g\", \"r\") # tuple of channel names for matplotlib\n", |
305 | 305 | "\n", |
306 | | - "for i, col in enumerate(color): \n", |
307 | | - " histr = cv.calcHist([img], [i], None, [256], [0,256])\n", |
308 | | - " \n", |
309 | | - " plt.plot(histr, color = col) \n", |
310 | | - " \n", |
311 | | - "plt.hist(img.ravel(), 256, [0,256]) \n", |
312 | | - "gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) \n", |
313 | | - "histr, _ = np.histogram(gray.ravel(), 256, [0,256])\n", |
314 | | - "plt.plot(histr, 'k')\n", |
315 | | - "plt.xlim([0,256]);" |
| 306 | + "for i, col in enumerate(color):\n", |
| 307 | + " histr = cv.calcHist([img], [i], None, [256], [0, 256])\n", |
| 308 | + " plt.plot(histr, color=col)\n", |
| 309 | + "\n", |
| 310 | + "plt.hist(img.ravel(), 256, (0, 256))\n", |
| 311 | + "gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n", |
| 312 | + "histr, _ = np.histogram(gray.ravel(), 256, (0, 256))\n", |
| 313 | + "plt.plot(histr, \"k\")\n", |
| 314 | + "plt.xlim([0, 256])" |
316 | 315 | ] |
317 | 316 | }, |
318 | 317 | { |
319 | 318 | "cell_type": "markdown", |
320 | 319 | "metadata": {}, |
321 | 320 | "source": [ |
322 | | - "From the histogram we can calculate the **CDF** (Cumulative Distribution Function)" |
| 321 | + "From the histogram we can calculate the **CDF** (Cumulative Distribution Function):" |
323 | 322 | ] |
324 | 323 | }, |
325 | 324 | { |
|
328 | 327 | "metadata": {}, |
329 | 328 | "outputs": [], |
330 | 329 | "source": [ |
331 | | - "color = ('b','g','r')\n", |
| 330 | + "color = (\"b\", \"g\", \"r\") # tuple of channel names for matplotlib\n", |
332 | 331 | "\n", |
333 | 332 | "for i, col in enumerate(color):\n", |
334 | | - " histr = cv.calcHist([img], [i], None, [256], [0,256])\n", |
335 | | - " \n", |
336 | | - " plt.plot(np.cumsum(histr), color = col) \n", |
337 | | - "gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) \n", |
338 | | - "histr, _ = np.histogram(gray.ravel(), 256, [0,256]) \n", |
339 | | - "plt.plot(np.cumsum(histr), 'k') \n", |
340 | | - "plt.xlim([0,256]); " |
| 333 | + " histr = cv.calcHist([img], [i], None, [256], [0, 256])\n", |
| 334 | + " plt.plot(np.cumsum(histr), color=col)\n", |
| 335 | + "\n", |
| 336 | + "gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n", |
| 337 | + "histr, _ = np.histogram(gray.ravel(), 256, (0, 256))\n", |
| 338 | + "plt.plot(np.cumsum(histr), \"k\")\n", |
| 339 | + "plt.xlim([0, 256])" |
341 | 340 | ] |
342 | 341 | }, |
343 | 342 | { |
|
348 | 347 | "\n", |
349 | 348 | "### Resize [cv.resize(...)](https://docs.opencv.org/4.4.0/da/d54/group__imgproc__transform.html#ga47a974309e9102f5f08231edc7e7529d)\n", |
350 | 349 | "\n", |
351 | | - "The interpolation function used by the resize algorithm is given by flag from [list](https://docs.opencv.org/4.4.0/da/d54/group__imgproc__transform.html#ga5bb5a1fea74ea38e1a5445ca803ff121)" |
| 350 | + "The interpolation function used by the resize algorithm is given by flag from [list](https://docs.opencv.org/4.4.0/da/d54/group__imgproc__transform.html#ga5bb5a1fea74ea38e1a5445ca803ff121)." |
352 | 351 | ] |
353 | 352 | }, |
354 | 353 | { |
|
357 | 356 | "metadata": {}, |
358 | 357 | "outputs": [], |
359 | 358 | "source": [ |
360 | | - "img_half = cv.resize(img, (int(img.shape[1]/6), int(img.shape[0]/6)), interpolation = cv.INTER_LINEAR)\n", |
| 359 | + "img_half = cv.resize(img, (int(img.shape[1] / 6), int(img.shape[0] / 6)), interpolation=cv.INTER_LINEAR)\n", |
| 360 | + "\n", |
361 | 361 | "plt.imshow(bgr2rgb(img_half))" |
362 | 362 | ] |
363 | 363 | }, |
|
367 | 367 | "source": [ |
368 | 368 | "### Move\n", |
369 | 369 | "\n", |
370 | | - "To move the image, we need to define a translation matrix and then apply the affine transformation defined by this matrix with a function cv.[warpAffine(...)](https://docs.opencv.org/4.4.0/da/d54/group__imgproc__transform.html#ga0203d9ee5fcd28d40dbc4a1ea4451983)" |
| 370 | + "To move the image, we need to define a translation matrix and then apply the affine transformation defined by this matrix with a function cv.[warpAffine(...)](https://docs.opencv.org/4.4.0/da/d54/group__imgproc__transform.html#ga0203d9ee5fcd28d40dbc4a1ea4451983)." |
371 | 371 | ] |
372 | 372 | }, |
373 | 373 | { |
|
376 | 376 | "metadata": {}, |
377 | 377 | "outputs": [], |
378 | 378 | "source": [ |
| 379 | + "(h, w) = img.shape[:2]\n", |
379 | 380 | "\n", |
380 | | - "(h, w) = img.shape[:2] \n", |
381 | | - "\n", |
382 | | - "M = np.float32([[1, 0, 100], \n", |
383 | | - " [0, 1, 50]])\n", |
384 | | - "img_tr = cv.warpAffine(img, M, (w,h))\n", |
| 381 | + "M = np.float32([[1, 0, 100], [0, 1, 50]])\n", |
| 382 | + "img_tr = cv.warpAffine(img, M, (w, h))\n", |
385 | 383 | "\n", |
386 | 384 | "plt.imshow(bgr2rgb(img_tr))" |
387 | 385 | ] |
|
392 | 390 | "source": [ |
393 | 391 | "### Rotation \n", |
394 | 392 | "\n", |
395 | | - "For rotation, we need the transformation matrix of the affine mapping that could be calculated using the function cv.[getRotationMatrix(...)](https://docs.opencv.org/4.4.0/da/d54/group__imgproc__transform.html#gafbbc470ce83812914a70abfb604f4326) and then apply the affine transformation with a function cv.[warpAffine(...)](https://docs.opencv.org/4.4.0/da/d54/group__imgproc__transform.html#ga0203d9ee5fcd28d40dbc4a1ea4451983)" |
| 393 | + "For rotation, we need the transformation matrix of the affine mapping that could be calculated using the function cv.[getRotationMatrix(...)](https://docs.opencv.org/4.4.0/da/d54/group__imgproc__transform.html#gafbbc470ce83812914a70abfb604f4326) and then apply the affine transformation with a function cv.[warpAffine(...)](https://docs.opencv.org/4.4.0/da/d54/group__imgproc__transform.html#ga0203d9ee5fcd28d40dbc4a1ea4451983)." |
396 | 394 | ] |
397 | 395 | }, |
398 | 396 | { |
|
401 | 399 | "metadata": {}, |
402 | 400 | "outputs": [], |
403 | 401 | "source": [ |
404 | | - "\n", |
405 | | - "(h, w) = img.shape[:2] \n", |
406 | | - "(cX, cY) = (w // 2, h // 2) # center of rotation\n", |
407 | | - "M = cv.getRotationMatrix2D((cX, cY), -45, 1.0) # center of operation, rotation, scale\n", |
| 402 | + "(h, w) = img.shape[:2]\n", |
| 403 | + "(cX, cY) = (w // 2, h // 2) # center of rotation\n", |
| 404 | + "M = cv.getRotationMatrix2D((cX, cY), -45, 1.0) # center of operation, rotation, scale\n", |
408 | 405 | "img_rt = cv.warpAffine(img, M, (w, h))\n", |
409 | 406 | "plt.imshow(bgr2rgb(img_rt))" |
410 | 407 | ] |
|
413 | 410 | "cell_type": "markdown", |
414 | 411 | "metadata": {}, |
415 | 412 | "source": [ |
416 | | - "The result is a rotated image, but with the loss of the edges that got outside the original image by rotation. If we want to see the whole picture, we have to calculate the new size of the canvas, the new center of the picture, and modify the transformation matrix with it (add move to rotation)" |
| 413 | + "The result is a rotated image, but with the loss of the edges that got outside the original image by rotation. If we want to see the whole picture, we have to calculate the new size of the canvas, the new center of the picture, and modify the transformation matrix with it (add move to rotation)." |
417 | 414 | ] |
418 | 415 | }, |
419 | 416 | { |
|
422 | 419 | "metadata": {}, |
423 | 420 | "outputs": [], |
424 | 421 | "source": [ |
425 | | - "(h, w) = img.shape[:2] \n", |
| 422 | + "(h, w) = img.shape[:2]\n", |
426 | 423 | "(cX, cY) = (w // 2, h // 2)\n", |
427 | 424 | "M = cv.getRotationMatrix2D((cX, cY), -45, 1.0)\n", |
428 | 425 | "cos = np.abs(M[0, 0])\n", |
|
455 | 452 | "metadata": {}, |
456 | 453 | "outputs": [], |
457 | 454 | "source": [ |
458 | | - "from skimage.measure import profile_line " |
| 455 | + "from skimage.measure import profile_line" |
459 | 456 | ] |
460 | 457 | }, |
461 | 458 | { |
|
464 | 461 | "metadata": {}, |
465 | 462 | "outputs": [], |
466 | 463 | "source": [ |
467 | | - "gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) \n", |
| 464 | + "gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n", |
468 | 465 | "print(gray.shape)" |
469 | 466 | ] |
470 | 467 | }, |
|
474 | 471 | "metadata": {}, |
475 | 472 | "outputs": [], |
476 | 473 | "source": [ |
477 | | - "p = profile_line(gray, [200, 0], [200, 920]) " |
| 474 | + "p = profile_line(gray, [200, 0], [200, 920])" |
478 | 475 | ] |
479 | 476 | }, |
480 | 477 | { |
|
483 | 480 | "metadata": {}, |
484 | 481 | "outputs": [], |
485 | 482 | "source": [ |
486 | | - "plt.plot(p); " |
| 483 | + "plt.plot(p)" |
487 | 484 | ] |
488 | | - }, |
489 | | - { |
490 | | - "cell_type": "code", |
491 | | - "execution_count": null, |
492 | | - "metadata": {}, |
493 | | - "outputs": [], |
494 | | - "source": [] |
495 | 485 | } |
496 | 486 | ], |
497 | 487 | "metadata": { |
|
0 commit comments