Skip to content

Commit c9f29b5

Browse files
committed
updated notebooks:
1 parent d851f2d commit c9f29b5

File tree

3 files changed

+92
-58
lines changed

3 files changed

+92
-58
lines changed

colab_starter.ipynb

Lines changed: 58 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131
"outputs": [],
3232
"source": [
3333
"%%bash\n",
34-
"git clone https://github.com/npatta01/pytorch-serving-workshop.git -b main"
34+
"git clone https://github.com/npatta01/pytorch-serving-workshop.git -b notebooks"
3535
]
3636
},
3737
{
@@ -79,6 +79,63 @@
7979
"source": [
8080
"!pip install -r pytorch-serving-workshop/requirements.txt"
8181
]
82+
},
83+
{
84+
"cell_type": "code",
85+
"execution_count": null,
86+
"id": "neither-shipping",
87+
"metadata": {},
88+
"outputs": [],
89+
"source": []
90+
},
91+
{
92+
"cell_type": "code",
93+
"execution_count": null,
94+
"id": "checked-priest",
95+
"metadata": {},
96+
"outputs": [],
97+
"source": [
98+
"!sudo apt-get install htop tmux -y"
99+
]
100+
},
101+
{
102+
"cell_type": "markdown",
103+
"id": "immune-institution",
104+
"metadata": {},
105+
"source": [
106+
"## Colabcode\n",
107+
"Colab doesn't support multiple notebooks, so we use colabcode to setup vscode + ngrok"
108+
]
109+
},
110+
{
111+
"cell_type": "code",
112+
"execution_count": null,
113+
"id": "insured-navigator",
114+
"metadata": {},
115+
"outputs": [],
116+
"source": [
117+
"!pip install colabcode==0.3.0"
118+
]
119+
},
120+
{
121+
"cell_type": "code",
122+
"execution_count": null,
123+
"id": "capital-major",
124+
"metadata": {},
125+
"outputs": [],
126+
"source": [
127+
"from colabcode import ColabCode\n"
128+
]
129+
},
130+
{
131+
"cell_type": "code",
132+
"execution_count": null,
133+
"id": "operational-spotlight",
134+
"metadata": {},
135+
"outputs": [],
136+
"source": [
137+
"ColabCode(port=10000, mount_drive=False)"
138+
]
82139
}
83140
],
84141
"metadata": {

notebooks/03_optimizing_model.ipynb

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -880,6 +880,9 @@
880880
"source": [
881881
"def time_model_evaluation(model, dataset=test_dataset,device:str='cpu'):\n",
882882
"\n",
883+
" if device ==\"cuda\" and not torch.cuda.is_available():\n",
884+
" return \n",
885+
"\n",
883886
" eval_start_time = time.time()\n",
884887
" \n",
885888
" result = utils.prediction_batch(model , dataset, device=device )\n",
@@ -890,6 +893,9 @@
890893
" print(\"Evaluate total time (ms): {0:.1f}\".format(eval_duration_time_ms))\n",
891894
" \n",
892895
"def time_model_evaluation_single(model,dataset=test_dataset, device:str='cpu'):\n",
896+
"\n",
897+
" if device ==\"cuda\" and not torch.cuda.is_available():\n",
898+
" return \n",
893899
" \n",
894900
" model = model.to(device)\n",
895901
"\n",
@@ -969,7 +975,6 @@
969975
],
970976
"source": [
971977
"%%timeit -r 3 -n 5\n",
972-
"\n",
973978
"time_model_evaluation_single(model, device='cuda')"
974979
]
975980
},

notebooks/04_packaging.ipynb

Lines changed: 28 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -498,6 +498,10 @@
498498
"source": [
499499
"%%bash\n",
500500
"cd .. \n",
501+
"\n",
502+
"rm -rf serving/model_store\n",
503+
"mkdir -p serving/model_store\n",
504+
"\n",
501505
"cp artifacts/model/distilbert-base-uncased__trace/model_store/* serving/model_store\n",
502506
"cp artifacts/model/distilbert-base-uncased__trace/setup_config.json serving/model_store/"
503507
]
@@ -548,7 +552,7 @@
548552
},
549553
{
550554
"cell_type": "code",
551-
"execution_count": 8,
555+
"execution_count": 1,
552556
"id": "pretty-graphic",
553557
"metadata": {},
554558
"outputs": [],
@@ -562,7 +566,7 @@
562566
},
563567
{
564568
"cell_type": "code",
565-
"execution_count": 9,
569+
"execution_count": 2,
566570
"id": "missing-champagne",
567571
"metadata": {},
568572
"outputs": [
@@ -580,24 +584,24 @@
580584
},
581585
{
582586
"cell_type": "code",
583-
"execution_count": 10,
587+
"execution_count": 3,
584588
"id": "endangered-responsibility",
585589
"metadata": {},
586590
"outputs": [
587591
{
588592
"name": "stdout",
589593
"output_type": "stream",
590594
"text": [
591-
"2021-10-25 16:47:59,303 [INFO ] W-9001-pt_classifier_1.0-stdout MODEL_LOG - Torch worker started.\r\n",
592-
"2021-10-25 16:47:59,303 [INFO ] W-9001-pt_classifier_1.0-stdout MODEL_LOG - Python runtime: 3.7.10\r\n",
593-
"2021-10-25 16:47:59,306 [INFO ] W-9001-pt_classifier_1.0-stdout MODEL_LOG - Connection accepted: /tmp/.ts.sock.9001.\r\n",
594-
"2021-10-25 16:47:59,311 [INFO ] W-9001-pt_classifier_1.0-stdout MODEL_LOG - model_name: pt_classifier, batchSize: 1\r\n",
595-
"2021-10-25 16:47:59,357 [INFO ] W-9003-pt_classifier_1.0-stdout MODEL_LOG - Listening on port: /tmp/.ts.sock.9003\r\n",
596-
"2021-10-25 16:47:59,358 [INFO ] W-9003-pt_classifier_1.0-stdout MODEL_LOG - [PID]6980\r\n",
597-
"2021-10-25 16:47:59,358 [INFO ] W-9003-pt_classifier_1.0-stdout MODEL_LOG - Torch worker started.\r\n",
598-
"2021-10-25 16:47:59,358 [INFO ] W-9003-pt_classifier_1.0-stdout MODEL_LOG - Python runtime: 3.7.10\r\n",
599-
"2021-10-25 16:47:59,359 [INFO ] W-9003-pt_classifier_1.0-stdout MODEL_LOG - Connection accepted: /tmp/.ts.sock.9003.\r\n",
600-
"2021-10-25 16:47:59,364 [INFO ] W-9003-pt_classifier_1.0-stdout MODEL_LOG - model_name: pt_classifier, batchSize: 1\r\n"
595+
"2021-10-25 17:52:13,208 [INFO ] W-9000-pt_classifier_1.0-stdout MODEL_LOG - Connection accepted: /tmp/.ts.sock.9000.\r\n",
596+
"2021-10-25 17:52:13,233 [INFO ] W-9000-pt_classifier_1.0-stdout MODEL_LOG - model_name: pt_classifier, batchSize: 1\r\n",
597+
"2021-10-25 17:52:13,676 [INFO ] W-9000-pt_classifier_1.0-stdout MODEL_LOG - Transformers version 4.11.1\r\n",
598+
"2021-10-25 17:52:35,354 [INFO ] W-9000-pt_classifier_1.0-stdout MODEL_LOG - Transformer model from path /tmp/models/84448fbb0cf64f8fa122a52b62531894 loaded successfully\r\n",
599+
"2021-10-25 17:53:10,874 [INFO ] W-9000-pt_classifier_1.0-stdout MODEL_LOG - Received text: 'herbal tea'\r\n",
600+
"2021-10-25 17:53:10,874 [WARN ] W-9000-pt_classifier_1.0-stderr MODEL_LOG - Truncation was not explicitly activated but `max_length` is provided a specific value, please use `truncation=True` to explicitly truncate examples to max length. Defaulting to 'longest_first' truncation strategy. If you encode pairs of sequences (GLUE-style) with the tokenizer you can select this strategy more precisely by providing a specific strategy to `truncation`.\r\n",
601+
"2021-10-25 17:53:19,419 [INFO ] W-9000-pt_classifier_1.0-stdout MODEL_LOG - Received text: 'herbal tea'\r\n",
602+
"2021-10-25 17:53:23,218 [INFO ] W-9000-pt_classifier_1.0-stdout MODEL_LOG - Received text: 'herbal tea'\r\n",
603+
"2021-10-25 17:53:25,753 [INFO ] W-9000-pt_classifier_1.0-stdout MODEL_LOG - Received text: 'herbal tea'\r\n",
604+
"2021-10-25 17:53:27,878 [INFO ] W-9000-pt_classifier_1.0-stdout MODEL_LOG - Received text: 'Herbal Tea'\r\n"
601605
]
602606
}
603607
],
@@ -607,7 +611,7 @@
607611
},
608612
{
609613
"cell_type": "code",
610-
"execution_count": 11,
614+
"execution_count": 4,
611615
"id": "forbidden-marriage",
612616
"metadata": {},
613617
"outputs": [
@@ -638,7 +642,7 @@
638642
},
639643
{
640644
"cell_type": "code",
641-
"execution_count": null,
645+
"execution_count": 5,
642646
"id": "sixth-gardening",
643647
"metadata": {},
644648
"outputs": [],
@@ -664,7 +668,7 @@
664668
},
665669
{
666670
"cell_type": "code",
667-
"execution_count": 12,
671+
"execution_count": 6,
668672
"id": "false-council",
669673
"metadata": {},
670674
"outputs": [
@@ -705,7 +709,7 @@
705709
},
706710
{
707711
"cell_type": "code",
708-
"execution_count": 13,
712+
"execution_count": 7,
709713
"id": "fancy-rings",
710714
"metadata": {},
711715
"outputs": [
@@ -729,10 +733,10 @@
729733
" \"id\": \"9000\",\r\n",
730734
" \"startTime\": \"2021-10-25T17:52:12.170Z\",\r\n",
731735
" \"status\": \"READY\",\r\n",
732-
" \"memoryUsage\": 0,\r\n",
736+
" \"memoryUsage\": 3148378112,\r\n",
733737
" \"pid\": 6903,\r\n",
734738
" \"gpu\": true,\r\n",
735-
" \"gpuUsage\": \"gpuId::0 utilization.gpu [%]::0 % utilization.memory [%]::0 % memory.used [MiB]::1362 MiB\"\r\n",
739+
" \"gpuUsage\": \"gpuId::0 utilization.gpu [%]::0 % utilization.memory [%]::0 % memory.used [MiB]::1640 MiB\"\r\n",
736740
" }\r\n",
737741
" ]\r\n",
738742
" }\r\n",
@@ -762,7 +766,7 @@
762766
},
763767
{
764768
"cell_type": "code",
765-
"execution_count": 17,
769+
"execution_count": 8,
766770
"id": "norman-trader",
767771
"metadata": {},
768772
"outputs": [
@@ -779,7 +783,7 @@
779783
" \"SHOES\": 3.6940335121471435e-05\r\n",
780784
" }\r\n",
781785
"]\r\n",
782-
"elasped time (sec):0.035052\r\n"
786+
"elasped time (sec):0.039702\r\n"
783787
]
784788
}
785789
],
@@ -800,7 +804,7 @@
800804
},
801805
{
802806
"cell_type": "code",
803-
"execution_count": 18,
807+
"execution_count": 9,
804808
"id": "solid-internship",
805809
"metadata": {},
806810
"outputs": [
@@ -817,7 +821,7 @@
817821
" \"SHOES\": 3.6940335121471435e-05\n",
818822
" }\n",
819823
"]\n",
820-
"elasped time (sec):0.036023\n"
824+
"elasped time (sec):0.035042\n"
821825
]
822826
},
823827
{
@@ -828,7 +832,7 @@
828832
" Dload Upload Total Spent Left Speed\n",
829833
"\r",
830834
" 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r",
831-
"100 265 100 214 100 51 5940 1415 --:--:-- --:--:-- --:--:-- 7571\n"
835+
"100 265 100 214 100 51 6106 1455 --:--:-- --:--:-- --:--:-- 7794\n"
832836
]
833837
}
834838
],
@@ -848,38 +852,6 @@
848852
"metadata": {},
849853
"outputs": [],
850854
"source": []
851-
},
852-
{
853-
"cell_type": "code",
854-
"execution_count": null,
855-
"id": "sitting-attack",
856-
"metadata": {},
857-
"outputs": [],
858-
"source": []
859-
},
860-
{
861-
"cell_type": "code",
862-
"execution_count": null,
863-
"id": "positive-geography",
864-
"metadata": {},
865-
"outputs": [],
866-
"source": []
867-
},
868-
{
869-
"cell_type": "code",
870-
"execution_count": null,
871-
"id": "smart-tennis",
872-
"metadata": {},
873-
"outputs": [],
874-
"source": []
875-
},
876-
{
877-
"cell_type": "code",
878-
"execution_count": null,
879-
"id": "deluxe-nebraska",
880-
"metadata": {},
881-
"outputs": [],
882-
"source": []
883855
}
884856
],
885857
"metadata": {

0 commit comments

Comments
 (0)