Skip to content

Commit b6d8599

Browse files
committed
Latest ruff
1 parent 24ff0d6 commit b6d8599

File tree

3 files changed

+5501
-23
lines changed

3 files changed

+5501
-23
lines changed

examples/binary_segmentation_intro.ipynb

Lines changed: 4211 additions & 1 deletion
Large diffs are not rendered by default.

examples/cars segmentation (camvid).ipynb

Lines changed: 1270 additions & 1 deletion
Large diffs are not rendered by default.

examples/save_load_model_and_share_with_hf_hub.ipynb

Lines changed: 20 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -64,14 +64,13 @@
6464
"# save the model\n",
6565
"model.save_pretrained(\n",
6666
" \"saved-model-dir/unet-with-metadata/\",\n",
67-
"\n",
6867
" # additional information to be saved with the model\n",
6968
" # only \"dataset\" and \"metrics\" are supported\n",
7069
" dataset=\"PASCAL VOC\", # only string name is supported\n",
71-
" metrics={ # should be a dictionary with metric name as key and metric value as value\n",
70+
" metrics={ # should be a dictionary with metric name as key and metric value as value\n",
7271
" \"mIoU\": 0.95,\n",
73-
" \"accuracy\": 0.96\n",
74-
" }\n",
72+
" \"accuracy\": 0.96,\n",
73+
" },\n",
7574
")"
7675
]
7776
},
@@ -222,13 +221,10 @@
222221
"# save the model and share it on the HF Hub (https://huggingface.co/models)\n",
223222
"model.save_pretrained(\n",
224223
" \"qubvel-hf/unet-with-metadata/\",\n",
225-
" push_to_hub=True, # <---------- push the model to the hub\n",
226-
" private=False, # <---------- make the model private or or public\n",
224+
" push_to_hub=True, # <---------- push the model to the hub\n",
225+
" private=False, # <---------- make the model private or or public\n",
227226
" dataset=\"PASCAL VOC\",\n",
228-
" metrics={\n",
229-
" \"mIoU\": 0.95,\n",
230-
" \"accuracy\": 0.96\n",
231-
" }\n",
227+
" metrics={\"mIoU\": 0.95, \"accuracy\": 0.96},\n",
232228
")\n",
233229
"\n",
234230
"# see result here https://huggingface.co/qubvel-hf/unet-with-metadata"
@@ -267,10 +263,7 @@
267263
"outputs": [],
268264
"source": [
269265
"# define a preprocessing transform for image that would be used during inference\n",
270-
"preprocessing_transform = A.Compose([\n",
271-
" A.Resize(256, 256),\n",
272-
" A.Normalize()\n",
273-
"])\n",
266+
"preprocessing_transform = A.Compose([A.Resize(256, 256), A.Normalize()])\n",
274267
"\n",
275268
"model = smp.Unet()"
276269
]
@@ -367,15 +360,21 @@
367360
"# You can also save training augmentations to the Hub too (and load it back)!\n",
368361
"#! Just make sure to provide key=\"train\" when saving and loading the augmentations.\n",
369362
"\n",
370-
"train_augmentations = A.Compose([\n",
371-
" A.HorizontalFlip(p=0.5),\n",
372-
" A.RandomBrightnessContrast(p=0.2),\n",
373-
" A.ShiftScaleRotate(p=0.5),\n",
374-
"])\n",
363+
"train_augmentations = A.Compose(\n",
364+
" [\n",
365+
" A.HorizontalFlip(p=0.5),\n",
366+
" A.RandomBrightnessContrast(p=0.2),\n",
367+
" A.ShiftScaleRotate(p=0.5),\n",
368+
" ]\n",
369+
")\n",
375370
"\n",
376-
"train_augmentations.save_pretrained(directory_or_repo_on_the_hub, key=\"train\", push_to_hub=True)\n",
371+
"train_augmentations.save_pretrained(\n",
372+
" directory_or_repo_on_the_hub, key=\"train\", push_to_hub=True\n",
373+
")\n",
377374
"\n",
378-
"restored_train_augmentations = A.Compose.from_pretrained(directory_or_repo_on_the_hub, key=\"train\")\n",
375+
"restored_train_augmentations = A.Compose.from_pretrained(\n",
376+
" directory_or_repo_on_the_hub, key=\"train\"\n",
377+
")\n",
379378
"print(restored_train_augmentations)"
380379
]
381380
},

0 commit comments

Comments
 (0)