Skip to content

Commit e6774a4

Browse files
-madding dip project
1 parent f91e027 commit e6774a4

27 files changed

+297
-0
lines changed

.gitignore

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,3 +5,5 @@ env/*
55
Python/env/*
66
JavaScript/code_with_harry(course)/*
77
java/AndroidDevelopment/Android/*
8+
.idea/
9+
*.h5

dip/demo.ipynb

Lines changed: 218 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,218 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "markdown",
5+
"metadata": {},
6+
"source": [
7+
"## part 1"
8+
]
9+
},
10+
{
11+
"cell_type": "code",
12+
"execution_count": null,
13+
"metadata": {},
14+
"outputs": [],
15+
"source": [
16+
"import pixellib\n",
17+
"from pixellib.semantic import semantic_segmentation\n",
18+
"from tensorflow.keras.layers import BatchNormalization\n"
19+
]
20+
},
21+
{
22+
"cell_type": "code",
23+
"execution_count": 9,
24+
"metadata": {},
25+
"outputs": [
26+
{
27+
"name": "stdout",
28+
"output_type": "stream",
29+
"text": [
30+
"WARNING:tensorflow:6 out of the last 6 calls to <function Model.make_predict_function.<locals>.predict_function at 0x7fb79c373060> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n",
31+
"1/1 [==============================] - 4s 4s/step\n",
32+
"Processed Image saved successfully in your current working directory.\n"
33+
]
34+
},
35+
{
36+
"data": {
37+
"text/plain": [
38+
"({'class_ids': array([ 0, 15]),\n",
39+
" 'masks': array([[False, False, False, ..., False, False, False],\n",
40+
" [False, False, False, ..., False, False, False],\n",
41+
" [False, False, False, ..., False, False, False],\n",
42+
" ...,\n",
43+
" [False, False, False, ..., False, False, False],\n",
44+
" [False, False, False, ..., False, False, False],\n",
45+
" [False, False, False, ..., False, False, False]])},\n",
46+
" array([[[ 0, 0, 0],\n",
47+
" [ 0, 0, 0],\n",
48+
" [ 0, 0, 0],\n",
49+
" ...,\n",
50+
" [ 0, 0, 0],\n",
51+
" [ 0, 0, 0],\n",
52+
" [ 0, 0, 0]],\n",
53+
" \n",
54+
" [[ 0, 0, 0],\n",
55+
" [ 0, 0, 0],\n",
56+
" [ 0, 0, 0],\n",
57+
" ...,\n",
58+
" [ 0, 0, 0],\n",
59+
" [ 0, 0, 0],\n",
60+
" [ 0, 0, 0]],\n",
61+
" \n",
62+
" [[ 0, 0, 0],\n",
63+
" [ 0, 0, 0],\n",
64+
" [ 0, 0, 0],\n",
65+
" ...,\n",
66+
" [ 0, 0, 0],\n",
67+
" [ 0, 0, 0],\n",
68+
" [ 0, 0, 0]],\n",
69+
" \n",
70+
" ...,\n",
71+
" \n",
72+
" [[ 0, 0, 0],\n",
73+
" [76, 76, 76],\n",
74+
" [76, 76, 76],\n",
75+
" ...,\n",
76+
" [76, 76, 76],\n",
77+
" [76, 76, 76],\n",
78+
" [76, 76, 76]],\n",
79+
" \n",
80+
" [[ 0, 0, 0],\n",
81+
" [76, 76, 76],\n",
82+
" [76, 76, 76],\n",
83+
" ...,\n",
84+
" [76, 76, 76],\n",
85+
" [76, 76, 76],\n",
86+
" [76, 76, 76]],\n",
87+
" \n",
88+
" [[ 0, 0, 0],\n",
89+
" [76, 76, 76],\n",
90+
" [76, 76, 76],\n",
91+
" ...,\n",
92+
" [76, 76, 76],\n",
93+
" [76, 76, 76],\n",
94+
" [76, 76, 76]]], dtype=uint8))"
95+
]
96+
},
97+
"execution_count": 9,
98+
"metadata": {},
99+
"output_type": "execute_result"
100+
}
101+
],
102+
"source": [
103+
"segment_image = semantic_segmentation()\n",
104+
"segment_image.load_pascalvoc_model(\"deeplabv3_xception_tf_dim_ordering_tf_kernels.h5\")\n",
105+
"segment_image.segmentAsPascalvoc(\"./images/dark1.jpg\", output_image_name = \"./images/newDark1.jpg\",overlay=True)"
106+
]
107+
},
108+
{
109+
"cell_type": "code",
110+
"execution_count": null,
111+
"metadata": {},
112+
"outputs": [],
113+
"source": [
114+
"segment_image = semantic_segmentation()\n",
115+
"segment_image.load_pascalvoc_model(\"deeplabv3_xception_tf_dim_ordering_tf_kernels.h5\")\n",
116+
"segment_image.segmentAsPascalvoc(\"./images/dark2.jpg\", output_image_name = \"./images/newDark2.jpg\")"
117+
]
118+
},
119+
{
120+
"cell_type": "code",
121+
"execution_count": null,
122+
"metadata": {},
123+
"outputs": [],
124+
"source": [
125+
"segment_image = semantic_segmentation()\n",
126+
"segment_image.load_pascalvoc_model(\"deeplabv3_xception_tf_dim_ordering_tf_kernels.h5\")\n",
127+
"segment_image.segmentAsPascalvoc(\"./images/horse.jpg\", output_image_name = \"./images/newHorse.jpg\")"
128+
]
129+
},
130+
{
131+
"cell_type": "code",
132+
"execution_count": null,
133+
"metadata": {},
134+
"outputs": [],
135+
"source": [
136+
"segment_image = semantic_segmentation()\n",
137+
"segment_image.load_pascalvoc_model(\"deeplabv3_xception_tf_dim_ordering_tf_kernels.h5\")\n",
138+
"segment_image.segmentAsPascalvoc(\"./images/cycle.jpg\", output_image_name = \"./images/newCycle.jpg\", overlay = True)"
139+
]
140+
},
141+
{
142+
"cell_type": "markdown",
143+
"metadata": {},
144+
"source": [
145+
"## part 2"
146+
]
147+
},
148+
{
149+
"cell_type": "code",
150+
"execution_count": null,
151+
"metadata": {},
152+
"outputs": [],
153+
"source": [
154+
"import pixellib\n",
155+
"from pixellib.semantic import semantic_segmentation\n",
156+
"import cv2\n"
157+
]
158+
},
159+
{
160+
"cell_type": "code",
161+
"execution_count": null,
162+
"metadata": {},
163+
"outputs": [],
164+
"source": [
165+
"segment_image = semantic_segmentation()\n",
166+
"segment_image.load_pascalvoc_model(\"pascal.h5\")\n",
167+
"output, segmap = segment_image.segmentAsPascalvoc(\"sample1.jpg\")\n",
168+
"cv2.imwrite(\".jpg\", output)\n",
169+
"print(output.shape)"
170+
]
171+
},
172+
{
173+
"cell_type": "markdown",
174+
"metadata": {},
175+
"source": [
176+
"## part 3"
177+
]
178+
},
179+
{
180+
"cell_type": "code",
181+
"execution_count": null,
182+
"metadata": {},
183+
"outputs": [],
184+
"source": [
185+
"import pixellib\n",
186+
"from pixellib.semantic import semantic_segmentation\n",
187+
"import cv2\n",
188+
"\n",
189+
"segment_image = semantic_segmentation()\n",
190+
"segment_image.load_pascalvoc_model(\"pascal.h5\")\n",
191+
"output, segmap = segment_image.segmentAsPascalvoc(\"./images/horse.png\")\n",
192+
"cv2.imwrite(\"./images/dog.jpg\", output)\n",
193+
"print(output.shape)"
194+
]
195+
}
196+
],
197+
"metadata": {
198+
"kernelspec": {
199+
"display_name": "venv",
200+
"language": "python",
201+
"name": "python3"
202+
},
203+
"language_info": {
204+
"codemirror_mode": {
205+
"name": "ipython",
206+
"version": 3
207+
},
208+
"file_extension": ".py",
209+
"mimetype": "text/x-python",
210+
"name": "python",
211+
"nbconvert_exporter": "python",
212+
"pygments_lexer": "ipython3",
213+
"version": "3.11.0rc1"
214+
}
215+
},
216+
"nbformat": 4,
217+
"nbformat_minor": 2
218+
}

dip/images/cycle.jpg

84.9 KB
Loading

dip/images/dark1.jpg

16.3 KB
Loading

dip/images/dark2.jpg

267 KB
Loading

dip/images/dog.jpg

17.7 KB
Loading

dip/images/horse.png

75.2 KB
Loading

dip/images/newCycle.jpg

115 KB
Loading

dip/images/newDark1.jpg

25.9 KB
Loading

dip/images/newDark2.jpg

195 KB
Loading

0 commit comments

Comments
 (0)