|
57 | 57 | "metadata": {},
|
58 | 58 | "outputs": [],
|
59 | 59 | "source": [
|
60 |
| - "# instantiate the models\n", |
| 60 | + "# init the models\n", |
61 | 61 | "vgg_model = vgg16.VGG16(weights='imagenet')\n",
|
62 | 62 | "\n",
|
63 | 63 | "inception_model = inception_v3.InceptionV3(weights='imagenet')\n",
|
|
71 | 71 | "cell_type": "markdown",
|
72 | 72 | "metadata": {},
|
73 | 73 | "source": [
|
74 |
| - "Let's define the image for further experiments assigning its location to the ``filename`` variable. You can choose any other image example to test the models." |
| 74 | + "Let's define the image path for further experiments in the ``filename`` variable. Please note, that there are other images in the ``images`` directory that you can use as well to test the models." |
75 | 75 | ]
|
76 | 76 | },
|
77 | 77 | {
|
|
144 | 144 | {
|
145 | 145 | "data": {
|
146 | 146 | "text/plain": [
|
147 |
| - "<matplotlib.image.AxesImage at 0x7f23000a2510>" |
| 147 | + "<matplotlib.image.AxesImage at 0x7f3270126c50>" |
148 | 148 | ]
|
149 | 149 | },
|
150 | 150 | "execution_count": 5,
|
|
212 | 212 | "metadata": {},
|
213 | 213 | "outputs": [
|
214 | 214 | {
|
215 |
| - "data": { |
216 |
| - "text/plain": [ |
217 |
| - "[[('n02123597', 'Siamese_cat', 0.30934104),\n", |
218 |
| - " ('n01877812', 'wallaby', 0.08034121),\n", |
219 |
| - " ('n02326432', 'hare', 0.07509851),\n", |
220 |
| - " ('n02325366', 'wood_rabbit', 0.05053079),\n", |
221 |
| - " ('n03223299', 'doormat', 0.048173636)]]" |
222 |
| - ] |
223 |
| - }, |
224 |
| - "execution_count": 6, |
225 |
| - "metadata": {}, |
226 |
| - "output_type": "execute_result" |
| 215 | + "name": "stdout", |
| 216 | + "output_type": "stream", |
| 217 | + "text": [ |
| 218 | + "('n02123597', 'Siamese_cat', 0.30934194)\n", |
| 219 | + "('n01877812', 'wallaby', 0.08034124)\n", |
| 220 | + "('n02326432', 'hare', 0.07509843)\n", |
| 221 | + "('n02325366', 'wood_rabbit', 0.0505307)\n", |
| 222 | + "('n03223299', 'doormat', 0.048173614)\n" |
| 223 | + ] |
227 | 224 | }
|
228 | 225 | ],
|
229 | 226 | "source": [
|
|
234 | 231 | "predictions = vgg_model.predict(processed_image)\n",
|
235 | 232 | "# print predictions\n",
|
236 | 233 | "# convert the probabilities to class labels\n",
|
237 |
| - "# We will get top 5 predictions which is the default\n", |
| 234 | + "# we will get top 5 predictions which is the default\n", |
238 | 235 | "label_vgg = decode_predictions(predictions)\n",
|
239 |
| - "label_vgg" |
| 236 | + "# print VGG16 predictions\n", |
| 237 | + "for prediction_id in range(len(label_vgg[0])):\n", |
| 238 | + " print(label_vgg[0][prediction_id])" |
240 | 239 | ]
|
241 | 240 | },
|
242 | 241 | {
|
|
252 | 251 | "metadata": {},
|
253 | 252 | "outputs": [
|
254 | 253 | {
|
255 |
| - "data": { |
256 |
| - "text/plain": [ |
257 |
| - "[[('n02124075', 'Egyptian_cat', 0.15737584),\n", |
258 |
| - " ('n03958227', 'plastic_bag', 0.14362834),\n", |
259 |
| - " ('n03223299', 'doormat', 0.14099452)]]" |
260 |
| - ] |
261 |
| - }, |
262 |
| - "execution_count": 7, |
263 |
| - "metadata": {}, |
264 |
| - "output_type": "execute_result" |
| 254 | + "name": "stdout", |
| 255 | + "output_type": "stream", |
| 256 | + "text": [ |
| 257 | + "('n02124075', 'Egyptian_cat', 0.15737602)\n", |
| 258 | + "('n03958227', 'plastic_bag', 0.14362879)\n", |
| 259 | + "('n03223299', 'doormat', 0.14099468)\n" |
| 260 | + ] |
265 | 261 | }
|
266 | 262 | ],
|
267 | 263 | "source": [
|
|
274 | 270 | "# convert the probabilities to class labels\n",
|
275 | 271 | "# If you want to see the top 3 predictions, specify it using the top argument\n",
|
276 | 272 | "label_resnet = decode_predictions(predictions, top=3)\n",
|
277 |
| - "label_resnet" |
| 273 | + "# print ResNet predictions\n", |
| 274 | + "for prediction_id in range(len(label_resnet[0])):\n", |
| 275 | + " print(label_resnet[0][prediction_id])" |
278 | 276 | ]
|
279 | 277 | },
|
280 | 278 | {
|
|
290 | 288 | "metadata": {},
|
291 | 289 | "outputs": [
|
292 | 290 | {
|
293 |
| - "data": { |
294 |
| - "text/plain": [ |
295 |
| - "[[('n03958227', 'plastic_bag', 0.15703735),\n", |
296 |
| - " ('n02124075', 'Egyptian_cat', 0.11697447),\n", |
297 |
| - " ('n02123597', 'Siamese_cat', 0.10532668),\n", |
298 |
| - " ('n02123045', 'tabby', 0.075648665),\n", |
299 |
| - " ('n02909870', 'bucket', 0.054680943)]]" |
300 |
| - ] |
301 |
| - }, |
302 |
| - "execution_count": 8, |
303 |
| - "metadata": {}, |
304 |
| - "output_type": "execute_result" |
| 291 | + "name": "stdout", |
| 292 | + "output_type": "stream", |
| 293 | + "text": [ |
| 294 | + "('n03958227', 'plastic_bag', 0.1570367)\n", |
| 295 | + "('n02124075', 'Egyptian_cat', 0.11697376)\n", |
| 296 | + "('n02123597', 'Siamese_cat', 0.10532685)\n", |
| 297 | + "('n02123045', 'tabby', 0.0756485)\n", |
| 298 | + "('n02909870', 'bucket', 0.054681662)\n" |
| 299 | + ] |
305 | 300 | }
|
306 | 301 | ],
|
307 | 302 | "source": [
|
|
313 | 308 | "\n",
|
314 | 309 | "# convert the probabilities to imagenet class labels\n",
|
315 | 310 | "label_mobilenet = decode_predictions(predictions)\n",
|
316 |
| - "label_mobilenet" |
| 311 | + "# print MobileNet predictions\n", |
| 312 | + "for prediction_id in range(len(label_mobilenet[0])):\n", |
| 313 | + " print(label_mobilenet[0][prediction_id])" |
317 | 314 | ]
|
318 | 315 | },
|
319 | 316 | {
|
|
332 | 329 | "metadata": {},
|
333 | 330 | "outputs": [
|
334 | 331 | {
|
335 |
| - "data": { |
336 |
| - "text/plain": [ |
337 |
| - "[[('n02124075', 'Egyptian_cat', 0.66225773),\n", |
338 |
| - " ('n02123045', 'tabby', 0.050285283),\n", |
339 |
| - " ('n02123597', 'Siamese_cat', 0.03638168),\n", |
340 |
| - " ('n02123159', 'tiger_cat', 0.023522485),\n", |
341 |
| - " ('n03223299', 'doormat', 0.0152056785)]]" |
342 |
| - ] |
343 |
| - }, |
344 |
| - "execution_count": 9, |
345 |
| - "metadata": {}, |
346 |
| - "output_type": "execute_result" |
| 332 | + "name": "stdout", |
| 333 | + "output_type": "stream", |
| 334 | + "text": [ |
| 335 | + "('n02124075', 'Egyptian_cat', 0.66225827)\n", |
| 336 | + "('n02123045', 'tabby', 0.05028525)\n", |
| 337 | + "('n02123597', 'Siamese_cat', 0.03638152)\n", |
| 338 | + "('n02123159', 'tiger_cat', 0.023522492)\n", |
| 339 | + "('n03223299', 'doormat', 0.015205677)\n" |
| 340 | + ] |
347 | 341 | }
|
348 | 342 | ],
|
349 | 343 | "source": [
|
|
364 | 358 | "\n",
|
365 | 359 | "# convert the probabilities to class labels\n",
|
366 | 360 | "label_inception = decode_predictions(predictions)\n",
|
367 |
| - "label_inception" |
| 361 | + "# print Inception predictions\n", |
| 362 | + "for prediction_id in range(len(label_inception[0])):\n", |
| 363 | + " print(label_inception[0][prediction_id])" |
368 | 364 | ]
|
369 | 365 | },
|
370 | 366 | {
|
|
404 | 400 | {
|
405 | 401 | "data": {
|
406 | 402 | "text/plain": [
|
407 |
| - "(-0.5, 699.5, 699.5, -0.5)" |
| 403 | + "<matplotlib.image.AxesImage at 0x7f32381f4a10>" |
408 | 404 | ]
|
409 | 405 | },
|
410 | 406 | "execution_count": 11,
|
|
426 | 422 | ],
|
427 | 423 | "source": [
|
428 | 424 | "plt.figure(figsize=[10,10])\n",
|
429 |
| - "plt.imshow(numpy_image)\n", |
430 |
| - "plt.axis('off')" |
| 425 | + "plt.axis('off')\n", |
| 426 | + "plt.imshow(numpy_image)" |
431 | 427 | ]
|
432 | 428 | }
|
433 | 429 | ],
|
|
0 commit comments