Skip to content

Commit 80acfe5

Browse files
Merge pull request #933 from Jaykumaran/master
cvpr2024-part-2
2 parents f3e2e58 + 0ebfd38 commit 80acfe5

File tree

61 files changed

+795
-0
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

61 files changed

+795
-0
lines changed

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ Want to become an expert in AI? [AI Courses by OpenCV](https://opencv.org/course
1515

1616
| Blog Post | Code|
1717
| ------------- |:-------------|
18+
| [CVPR 2024 Research Papers - Part- 2](https://learnopencv.com/cvpr-2024-research-papers) | [Code](https://github.com/spmallick/learnopencv/tree/master/cvpr-2024-research-papers-part2) |
1819
| [CVPR 2024: An Overview and Key Papers](https://learnopencv.com/cvpr2024/) | [Code](https://github.com/spmallick/learnopencv/tree/master/CVPR-2024) |
1920
| [Object Detection on Edge Device - OAK-D-Lite](https://learnopencv.com/object-detection-on-edge-device) | [Code](https://github.com/spmallick/learnopencv/tree/master/Object-Detection-on-Edge-Devices) |
2021
| [Fine-Tuning YOLOv10 Models on Custom Dataset](https://learnopencv.com/fine-tuning-yolov10/) | [Code](https://github.com/spmallick/learnopencv/tree/master/Fine-Tuning-YOLOv10-Models-Custom-Dataset) |
Lines changed: 321 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,321 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "code",
5+
"execution_count": 1,
6+
"metadata": {
7+
"colab": {
8+
"base_uri": "https://localhost:8080/"
9+
},
10+
"id": "_OcPAA8FUqJ8",
11+
"outputId": "30b4dc55-9365-4336-ea3a-5ec100542a29"
12+
},
13+
"outputs": [
14+
{
15+
"name": "stdout",
16+
"output_type": "stream",
17+
"text": [
18+
"Cloning into 'DocRes'...\n",
19+
"remote: Enumerating objects: 243, done.\u001b[K\n",
20+
"remote: Counting objects: 100% (30/30), done.\u001b[K\n",
21+
"remote: Compressing objects: 100% (24/24), done.\u001b[K\n",
22+
"remote: Total 243 (delta 11), reused 15 (delta 6), pack-reused 213\u001b[K\n",
23+
"Receiving objects: 100% (243/243), 223.36 MiB | 2.90 MiB/s, done.\n",
24+
"Resolving deltas: 100% (22/22), done.\n",
25+
"Updating files: 100% (137/137), done.\n"
26+
]
27+
}
28+
],
29+
"source": [
30+
"!git clone https://github.com/ZZZHANG-jx/DocRes.git"
31+
]
32+
},
33+
{
34+
"cell_type": "code",
35+
"execution_count": 1,
36+
"metadata": {
37+
"colab": {
38+
"base_uri": "https://localhost:8080/"
39+
},
40+
"id": "mbmRFo-mUu3y",
41+
"outputId": "94bfcce6-4d41-45f6-8864-38b1c42e65b2"
42+
},
43+
"outputs": [
44+
{
45+
"name": "stdout",
46+
"output_type": "stream",
47+
"text": [
48+
"/home/jaykumaran/Blogs/CVPR 2024/DocRes/DocRes\n"
49+
]
50+
},
51+
{
52+
"name": "stderr",
53+
"output_type": "stream",
54+
"text": [
55+
"/home/jaykumaran/.local/lib/python3.10/site-packages/IPython/core/magics/osm.py:417: UserWarning: This is now an optional IPython functionality, setting dhist requires you to install the `pickleshare` library.\n",
56+
" self.shell.db['dhist'] = compress_dhist(dhist)[-100:]\n"
57+
]
58+
}
59+
],
60+
"source": [
61+
"%cd DocRes"
62+
]
63+
},
64+
{
65+
"cell_type": "code",
66+
"execution_count": 3,
67+
"metadata": {
68+
"colab": {
69+
"base_uri": "https://localhost:8080/"
70+
},
71+
"id": "jM6ZYPA6VCNn",
72+
"outputId": "dc4e9ca9-48d4-4942-fc2d-4c0903261be9"
73+
},
74+
"outputs": [
75+
{
76+
"name": "stdout",
77+
"output_type": "stream",
78+
"text": [
79+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.6/1.6 GB\u001b[0m \u001b[31m1.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
80+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m22.3/22.3 MB\u001b[0m \u001b[31m50.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
81+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.9/15.9 MB\u001b[0m \u001b[31m49.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
82+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m43.2/43.2 kB\u001b[0m \u001b[31m4.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
83+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m4.5/4.5 MB\u001b[0m \u001b[31m101.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
84+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m6.8/6.8 MB\u001b[0m \u001b[31m103.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
85+
"\u001b[?25h\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
86+
"chex 0.1.86 requires numpy>=1.24.1, but you have numpy 1.21.6 which is incompatible.\n",
87+
"cudf-cu12 24.4.1 requires numpy<2.0a0,>=1.23, but you have numpy 1.21.6 which is incompatible.\n",
88+
"flax 0.8.4 requires numpy>=1.22, but you have numpy 1.21.6 which is incompatible.\n",
89+
"jax 0.4.26 requires numpy>=1.22, but you have numpy 1.21.6 which is incompatible.\n",
90+
"jaxlib 0.4.26+cuda12.cudnn89 requires numpy>=1.22, but you have numpy 1.21.6 which is incompatible.\n",
91+
"numba 0.58.1 requires numpy<1.27,>=1.22, but you have numpy 1.21.6 which is incompatible.\n",
92+
"numexpr 2.10.1 requires numpy>=1.23.0, but you have numpy 1.21.6 which is incompatible.\n",
93+
"pandas-stubs 2.0.3.230814 requires numpy>=1.25.0; python_version >= \"3.9\", but you have numpy 1.21.6 which is incompatible.\n",
94+
"plotnine 0.12.4 requires numpy>=1.23.0, but you have numpy 1.21.6 which is incompatible.\n",
95+
"rmm-cu12 24.4.0 requires numpy<2.0a0,>=1.23, but you have numpy 1.21.6 which is incompatible.\n",
96+
"statsmodels 0.14.2 requires numpy>=1.22.3, but you have numpy 1.21.6 which is incompatible.\n",
97+
"tensorflow 2.15.0 requires numpy<2.0.0,>=1.23.5, but you have numpy 1.21.6 which is incompatible.\n",
98+
"torchaudio 2.3.0+cu121 requires torch==2.3.0, but you have torch 1.11.0+cu113 which is incompatible.\n",
99+
"torchtext 0.18.0 requires torch>=2.3.0, but you have torch 1.11.0+cu113 which is incompatible.\n",
100+
"xarray-einstats 0.7.0 requires numpy>=1.22, but you have numpy 1.21.6 which is incompatible.\u001b[0m\u001b[31m\n",
101+
"\u001b[0m"
102+
]
103+
}
104+
],
105+
"source": [
106+
"# !pip install -r requirements.txt -q"
107+
]
108+
},
109+
{
110+
"cell_type": "code",
111+
"execution_count": 6,
112+
"metadata": {
113+
"id": "p_DZoipl8OIL"
114+
},
115+
"outputs": [],
116+
"source": [
117+
"import os\n",
118+
"os.makedirs(\"data/MBD/checkpoint\", exist_ok=True)\n",
119+
"os.makedirs(\"checkpoints\", exist_ok=True)"
120+
]
121+
},
122+
{
123+
"cell_type": "code",
124+
"execution_count": 11,
125+
"metadata": {},
126+
"outputs": [
127+
{
128+
"name": "stdout",
129+
"output_type": "stream",
130+
"text": [
131+
"\u001b[0m\u001b[01;34mcheckpoints\u001b[0m/ inference.py \u001b[01;34mmodels\u001b[0m/ \u001b[01;34mrestorted\u001b[0m/\n",
132+
"\u001b[01;34mdata\u001b[0m/ \u001b[01;34minput\u001b[0m/ \u001b[01;34m__pycache__\u001b[0m/ start_train.sh\n",
133+
"eval.py LICENSE README.md train.py\n",
134+
"\u001b[01;34mimages\u001b[0m/ \u001b[01;34mloaders\u001b[0m/ requirements.txt utils.py\n"
135+
]
136+
}
137+
],
138+
"source": [
139+
"ls"
140+
]
141+
},
142+
{
143+
"cell_type": "code",
144+
"execution_count": 8,
145+
"metadata": {
146+
"colab": {
147+
"base_uri": "https://localhost:8080/"
148+
},
149+
"id": "wzrFPppj0bqF",
150+
"outputId": "82ac1852-9ecc-42b1-c801-9dbbe093de6a"
151+
},
152+
"outputs": [],
153+
"source": [
154+
"!wget -O checkpoints/docres.pkl https://www.dropbox.com/scl/fi/7jy1040wa6rkaj74blbla/docres.pkl?rlkey=by6wjr8en5r3wxrrx4don2911&st=4tawdvez&dl=1"
155+
]
156+
},
157+
{
158+
"cell_type": "code",
159+
"execution_count": 9,
160+
"metadata": {
161+
"colab": {
162+
"base_uri": "https://localhost:8080/"
163+
},
164+
"id": "CsN8CPCd71k4",
165+
"outputId": "be0f48fb-2080-44c8-bc65-ea85179b1055"
166+
},
167+
"outputs": [],
168+
"source": [
169+
"!wget -O data/MBD/checkpoints/mbd.pkl https://www.dropbox.com/scl/fi/xtbxj8qn3qofuo9qcmqrt/mbd.pkl?rlkey=fa4fvzjtmwtd85s00eqdagufm&st=i1okanou&dl=1"
170+
]
171+
},
172+
{
173+
"cell_type": "code",
174+
"execution_count": 3,
175+
"metadata": {
176+
"colab": {
177+
"base_uri": "https://localhost:8080/",
178+
"height": 35
179+
},
180+
"id": "EZzoKoq78mtj",
181+
"outputId": "3cba9c60-13df-4cb5-bdd4-d9bd4b054074"
182+
},
183+
"outputs": [
184+
{
185+
"data": {
186+
"text/plain": [
187+
"'/home/jaykumaran/Blogs/CVPR 2024/DocRes/DocRes'"
188+
]
189+
},
190+
"execution_count": 3,
191+
"metadata": {},
192+
"output_type": "execute_result"
193+
}
194+
],
195+
"source": [
196+
"pwd"
197+
]
198+
},
199+
{
200+
"cell_type": "markdown",
201+
"metadata": {},
202+
"source": [
203+
"# TASK: dewarping"
204+
]
205+
},
206+
{
207+
"cell_type": "code",
208+
"execution_count": null,
209+
"metadata": {
210+
"colab": {
211+
"base_uri": "https://localhost:8080/"
212+
},
213+
"id": "Pk4ZSRMeVDK7",
214+
"outputId": "0b8f888b-39e4-4506-8d6d-86a25e03be20",
215+
"scrolled": true
216+
},
217+
"outputs": [],
218+
"source": [
219+
"!python inference.py --im_path ./input/218_in.png --task dewarping --save_dtsprompt 1"
220+
]
221+
},
222+
{
223+
"cell_type": "markdown",
224+
"metadata": {},
225+
"source": [
226+
"# TASK: deshadowing"
227+
]
228+
},
229+
{
230+
"cell_type": "code",
231+
"execution_count": 3,
232+
"metadata": {},
233+
"outputs": [
234+
{
235+
"name": "stdout",
236+
"output_type": "stream",
237+
"text": [
238+
"^C\n"
239+
]
240+
}
241+
],
242+
"source": [
243+
"!python inference.py --im_path ./input/218_in.png --task deshadowing --save_dtsprompt 1"
244+
]
245+
},
246+
{
247+
"cell_type": "markdown",
248+
"metadata": {},
249+
"source": [
250+
"# TASK: appearance enhancement"
251+
]
252+
},
253+
{
254+
"cell_type": "code",
255+
"execution_count": null,
256+
"metadata": {},
257+
"outputs": [],
258+
"source": [
259+
"!python inference.py --im_path ./input/218_in.png --task appearance --save_dtsprompt 1"
260+
]
261+
},
262+
{
263+
"cell_type": "markdown",
264+
"metadata": {},
265+
"source": [
266+
"# TASK: deblurring"
267+
]
268+
},
269+
{
270+
"cell_type": "code",
271+
"execution_count": null,
272+
"metadata": {},
273+
"outputs": [],
274+
"source": [
275+
"!python inference.py --im_path ./input/218_in.png --task deblurring --save_dtsprompt d"
276+
]
277+
},
278+
{
279+
"cell_type": "markdown",
280+
"metadata": {},
281+
"source": [
282+
"# TASK: binarization"
283+
]
284+
},
285+
{
286+
"cell_type": "code",
287+
"execution_count": null,
288+
"metadata": {},
289+
"outputs": [],
290+
"source": [
291+
"!python inference.py --im_path ./input/218_in.png --task deblurring --save_dtsprompt d"
292+
]
293+
}
294+
],
295+
"metadata": {
296+
"accelerator": "GPU",
297+
"colab": {
298+
"gpuType": "T4",
299+
"provenance": []
300+
},
301+
"kernelspec": {
302+
"display_name": "Python 3 (ipykernel)",
303+
"language": "python",
304+
"name": "python3"
305+
},
306+
"language_info": {
307+
"codemirror_mode": {
308+
"name": "ipython",
309+
"version": 3
310+
},
311+
"file_extension": ".py",
312+
"mimetype": "text/x-python",
313+
"name": "python",
314+
"nbconvert_exporter": "python",
315+
"pygments_lexer": "ipython3",
316+
"version": "3.10.12"
317+
}
318+
},
319+
"nbformat": 4,
320+
"nbformat_minor": 4
321+
}

0 commit comments

Comments
 (0)