11{
2- "cells" : [
3- {
4- "cell_type" : " markdown" ,
5- "metadata" : {
6- "colab_type" : " text" ,
7- "id" : " view-in-github"
8- },
9- "source" : [
10- " <a href=\" https://colab.research.google.com/github/LostRuins/koboldcpp/blob/concedo/colab.ipynb\" target=\" _parent\" ><img src=\" https://colab.research.google.com/assets/colab-badge.svg\" alt=\" Open In Colab\" /></a>"
11- ]
12- },
13- {
14- "cell_type" : " markdown" ,
15- "metadata" : {
16- "id" : " 2FCn5tmpn3UV"
17- },
18- "source" : [
19- " ## Welcome to the Official KoboldCpp Colab Notebook\n " ,
20- " It's really easy to get started. Just press the two **Play** buttons below, and then connect to the **Cloudflare URL** shown at the end.\n " ,
21- " You can select a model from the dropdown, or enter a **custom URL** to a GGUF model (Example: `https://huggingface.co/KoboldAI/LLaMA2-13B-Tiefighter-GGUF/resolve/main/LLaMA2-13B-Tiefighter.Q4_K_M.gguf`)\n " ,
22- " \n " ,
23- " **Keep this page open and occationally check for captcha's so that your AI is not shut down**"
24- ]
25- },
26- {
27- "cell_type" : " code" ,
28- "execution_count" : null ,
29- "metadata" : {
30- "id" : " QNaj3u0jn3UW"
31- },
32- "outputs" : [],
33- "source" : [
34- " #@title <-- Tap this if you play on Mobile { display-mode: \" form\" }\n " ,
35- " %%html\n " ,
36- " <b>Press play on the music player to keep the tab alive, then start KoboldCpp below</b><br/>\n " ,
37- " <audio autoplay=\"\" src=\" https://raw.githubusercontent.com/KoboldAI/KoboldAI-Client/main/colab/silence.m4a\" loop controls>"
38- ]
39- },
40- {
41- "cell_type" : " code" ,
42- "execution_count" : null ,
43- "metadata" : {
44- "cellView" : " form" ,
45- "id" : " uJS9i_Dltv8Y"
46- },
47- "outputs" : [],
48- "source" : [
49- " #@title <b>v-- Enter your model below and then click this to start Koboldcpp</b>\r\n " ,
50- " \r\n " ,
51- "Model = \"https://huggingface.co/KoboldAI/LLaMA2-13B-Tiefighter-GGUF/resolve/main/LLaMA2-13B-Tiefighter.Q4_K_S.gguf\" #@param [\"https://huggingface.co/KoboldAI/LLaMA2-13B-Tiefighter-GGUF/resolve/main/LLaMA2-13B-Tiefighter.Q4_K_S.gguf\",\"https://huggingface.co/KoboldAI/LLaMA2-13B-Estopia-GGUF/resolve/main/LLaMA2-13B-Estopia.Q4_K_S.gguf\",\"https://huggingface.co/mradermacher/Fimbulvetr-11B-v2-GGUF/resolve/main/Fimbulvetr-11B-v2.Q4_K_S.gguf\",\"https://huggingface.co/TheBloke/MythoMax-L2-13B-GGUF/resolve/main/mythomax-l2-13b.Q4_K_M.gguf\",\"https://huggingface.co/TheBloke/ReMM-SLERP-L2-13B-GGUF/resolve/main/remm-slerp-l2-13b.Q4_K_M.gguf\",\"https://huggingface.co/TheBloke/Xwin-LM-13B-v0.2-GGUF/resolve/main/xwin-lm-13b-v0.2.Q4_K_M.gguf\",\"https://huggingface.co/mradermacher/mini-magnum-12b-v1.1-GGUF/resolve/main/mini-magnum-12b-v1.1.Q4_K_S.gguf\",\"https://huggingface.co/TheBloke/Stheno-L2-13B-GGUF/resolve/main/stheno-l2-13b.Q4_K_M.gguf\",\"https://huggingface.co/TheBloke/MythoMax-L2-Kimiko-v2-13B-GGUF/resolve/main/mythomax-l2-kimiko-v2-13b.Q4_K_M.gguf\",\"https://huggingface.co/bartowski/Rocinante-12B-v1.1-GGUF/resolve/main/Rocinante-12B-v1.1-Q4_K_S.gguf\",\"https://huggingface.co/TheBloke/MistRP-Airoboros-7B-GGUF/resolve/main/mistrp-airoboros-7b.Q4_K_S.gguf\",\"https://huggingface.co/TheBloke/airoboros-mistral2.2-7B-GGUF/resolve/main/airoboros-mistral2.2-7b.Q4_K_S.gguf\",\"https://huggingface.co/concedo/KobbleTinyV2-1.1B-GGUF/resolve/main/KobbleTiny-Q4_K.gguf\",\"https://huggingface.co/grimjim/kukulemon-7B-GGUF/resolve/main/kukulemon-7B.Q8_0.gguf\",\"https://huggingface.co/mradermacher/LemonKunoichiWizardV3-GGUF/resolve/main/LemonKunoichiWizardV3.Q4_K_M.gguf\",\"https://huggingface.co/Lewdiculous/Kunoichi-DPO-v2-7B-GGUF-Imatrix/resolve/main/Kunoichi-DPO-v2-7B-Q4_K_M-imatrix.gguf\",\"https://huggingface.co/mradermacher/L3-8B-Stheno-v3.2-i1-GGUF/resolve/main/L3-8B-Stheno-v3.2.i1-Q4_K_M.gguf\",\"https://huggingface.co/Lewdiculous/Llama-3-Lumimaid-8B-v0.1-OAS-GGUF-IQ-Imatrix/resolve/main/v2-Llama-3-Lumimaid-8B-v0.1-OAS-Q4_K_M-imat.gguf\",\"https://huggingface.co/bartowski/NeuralDaredevil-8B-abliterated-GGUF/resolve/main/NeuralDaredevil-8B-abliterated-Q4_K_M.gguf\",\"https://huggingface.co/bartowski/L3-8B-Lunaris-v1-GGUF/resolve/main/L3-8B-Lunaris-v1-Q4_K_M.gguf\",\"https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2.0-8B-GGUF/resolve/main/L3-Umbral-Mind-RP-v2.0-8B.Q4_K_M.gguf\"]{allow-input: true}\r\n",
52- " Layers = 99 #@param [99]{allow-input: true}\r\n " ,
53- " ContextSize = 4096 #@param [4096,8192] {allow-input: true}\r\n " ,
54- " FlashAttention = True #@param {type:\" boolean\" }\r\n " ,
55- " FACommand = \"\"\r\n " ,
56- " #@markdown <hr>\r\n " ,
57- " LoadVisionMMProjector = False #@param {type:\" boolean\" }\r\n " ,
58- " Mmproj = \" https://huggingface.co/koboldcpp/mmproj/resolve/main/llama-13b-mmproj-v1.5.Q4_1.gguf\" #@param [\" https://huggingface.co/koboldcpp/mmproj/resolve/main/llama-13b-mmproj-v1.5.Q4_1.gguf\" ,\" https://huggingface.co/koboldcpp/mmproj/resolve/main/mistral-7b-mmproj-v1.5-Q4_1.gguf\" ,\" https://huggingface.co/koboldcpp/mmproj/resolve/main/llama-7b-mmproj-v1.5-Q4_0.gguf\" ,\" https://huggingface.co/koboldcpp/mmproj/resolve/main/LLaMA3-8B_mmproj-Q4_1.gguf\" ]{allow-input: true}\r\n " ,
59- " VCommand = \"\"\r\n " ,
60- " #@markdown <hr>\r\n " ,
61- " LoadImgModel = False #@param {type:\" boolean\" }\r\n " ,
62- " ImgModel = \" https://huggingface.co/koboldcpp/imgmodel/resolve/main/imgmodel_ftuned_q4_0.gguf\" #@param [\" https://huggingface.co/koboldcpp/imgmodel/resolve/main/imgmodel_ftuned_q4_0.gguf\" ]{allow-input: true}\r\n " ,
63- " SCommand = \"\"\r\n " ,
64- " #@markdown <hr>\r\n " ,
65- " LoadSpeechModel = False #@param {type:\" boolean\" }\r\n " ,
66- " SpeechModel = \" https://huggingface.co/koboldcpp/whisper/resolve/main/whisper-base.en-q5_1.bin\" #@param [\" https://huggingface.co/koboldcpp/whisper/resolve/main/whisper-base.en-q5_1.bin\" ]{allow-input: true}\r\n " ,
67- " WCommand = \"\"\r\n " ,
68- " \r\n " ,
69- " import os\r\n " ,
70- " if not os.path.isfile(\" /opt/bin/nvidia-smi\" ):\r\n " ,
71- " raise RuntimeError(\" ⚠️Colab did not give you a GPU due to usage limits, this can take a few hours before they let you back in. Check out https://lite.koboldai.net for a free alternative (that does not provide an API link but can load KoboldAI saves and chat cards) or subscribe to Colab Pro for immediate access.⚠️\" )\r\n " ,
72- " \r\n " ,
73- " %cd /content\r\n " ,
74- " if Mmproj and LoadVisionMMProjector:\r\n " ,
75- " VCommand = \" --mmproj vmodel.gguf\"\r\n " ,
76- " else:\r\n " ,
77- " SCommand = \"\"\r\n " ,
78- " if ImgModel and LoadImgModel:\r\n " ,
79- " SCommand = \" --sdmodel imodel.gguf --sdthreads 4 --sdquant --sdclamped\"\r\n " ,
80- " else:\r\n " ,
81- " SCommand = \"\"\r\n " ,
82- " if SpeechModel and LoadSpeechModel:\r\n " ,
83- " WCommand = \" --whispermodel wmodel.bin\"\r\n " ,
84- " else:\r\n " ,
85- " WCommand = \"\"\r\n " ,
86- " if FlashAttention:\r\n " ,
87- " FACommand = \" --flashattention\"\r\n " ,
88- " else:\r\n " ,
89- " FACommand = \"\"\r\n " ,
90- " \r\n " ,
91- " !echo Downloading KoboldCpp, please wait...\r\n " ,
92- " !wget -O dlfile.tmp https://kcpplinux.concedo.workers.dev && mv dlfile.tmp koboldcpp_linux\r\n " ,
93- " !test -f koboldcpp_linux && echo Download Successful || echo Download Failed\r\n " ,
94- " !chmod +x ./koboldcpp_linux\r\n " ,
95- " !apt update\r\n " ,
96- " !apt install aria2 -y\r\n " ,
97- " # simple fix for a common URL mistake\r\n " ,
98- " if \" https://huggingface.co/\" in Model and \" /blob/main/\" in Model: \r\n " ,
99- " Model = Model.replace(\" /blob/main/\" , \" /resolve/main/\" )\r\n " ,
100- " !aria2c -x 10 -o model.gguf --summary-interval=5 --download-result=default --allow-overwrite=true --file-allocation=none $Model\r\n " ,
101- " if VCommand:\r\n " ,
102- " !aria2c -x 10 -o vmodel.gguf --summary-interval=5 --download-result=default --allow-overwrite=true --file-allocation=none $Mmproj\r\n " ,
103- " if SCommand:\r\n " ,
104- " !aria2c -x 10 -o imodel.gguf --summary-interval=5 --download-result=default --allow-overwrite=true --file-allocation=none $ImgModel\r\n " ,
105- " if WCommand:\r\n " ,
106- " !aria2c -x 10 -o wmodel.bin --summary-interval=5 --download-result=default --allow-overwrite=true --file-allocation=none $SpeechModel\r\n " ,
107- " !./koboldcpp_linux model.gguf --usecublas 0 mmq --multiuser --gpulayers $Layers --contextsize $ContextSize --quiet --remotetunnel $FACommand $VCommand $SCommand $WCommand\r\n "
108- ]
109- }
110- ],
111- "metadata" : {
112- "accelerator" : " GPU" ,
113- "colab" : {
114- "cell_execution_strategy" : " setup" ,
115- "gpuType" : " T4" ,
116- "include_colab_link" : true ,
117- "private_outputs" : true ,
118- "provenance" : []
119- },
120- "kernelspec" : {
121- "display_name" : " Python 3" ,
122- "name" : " python3"
123- },
124- "language_info" : {
125- "name" : " python"
126- }
2+ "cells" : [
3+ {
4+ "cell_type" : " markdown" ,
5+ "metadata" : {
6+ "colab_type" : " text" ,
7+ "id" : " view-in-github"
8+ },
9+ "source" : [
10+ " <a href=\" https://colab.research.google.com/github/LostRuins/koboldcpp/blob/concedo/colab.ipynb\" target=\" _parent\" ><img src=\" https://colab.research.google.com/assets/colab-badge.svg\" alt=\" Open In Colab\" /></a>"
11+ ]
12712 },
128- "nbformat" : 4 ,
129- "nbformat_minor" : 0
130- }
13+ {
14+ "cell_type" : " markdown" ,
15+ "metadata" : {
16+ "id" : " 2FCn5tmpn3UV"
17+ },
18+ "source" : [
19+ " ## Welcome to the Official KoboldCpp Colab Notebook\n " ,
20+ " It's really easy to get started. Just press the two **Play** buttons below, and then connect to the **Cloudflare URL** shown at the end.\n " ,
21+ " You can select a model from the dropdown, or enter a **custom URL** to a GGUF model (Example: `https://huggingface.co/KoboldAI/LLaMA2-13B-Tiefighter-GGUF/resolve/main/LLaMA2-13B-Tiefighter.Q4_K_M.gguf`)\n " ,
22+ " \n " ,
23+ " **Keep this page open and occationally check for captcha's so that your AI is not shut down**"
24+ ]
25+ },
26+ {
27+ "cell_type" : " code" ,
28+ "execution_count" : null ,
29+ "metadata" : {
30+ "id" : " QNaj3u0jn3UW"
31+ },
32+ "outputs" : [],
33+ "source" : [
34+ " #@title <-- Tap this if you play on Mobile { display-mode: \" form\" }\n " ,
35+ " %%html\n " ,
36+ " <b>Press play on the music player to keep the tab alive, then start KoboldCpp below</b><br/>\n " ,
37+ " <audio autoplay=\"\" src=\" https://raw.githubusercontent.com/KoboldAI/KoboldAI-Client/main/colab/silence.m4a\" loop controls>"
38+ ]
39+ },
40+ {
41+ "cell_type" : " code" ,
42+ "execution_count" : null ,
43+ "metadata" : {
44+ "cellView" : " form" ,
45+ "id" : " uJS9i_Dltv8Y"
46+ },
47+ "outputs" : [],
48+ "source" : [
49+ " #@title <b>v-- Enter your model below and then click this to start Koboldcpp</b>\n " ,
50+ " \n " ,
51+ "Model = \"https://huggingface.co/KoboldAI/LLaMA2-13B-Tiefighter-GGUF/resolve/main/LLaMA2-13B-Tiefighter.Q4_K_S.gguf\" #@param [\"https://huggingface.co/KoboldAI/LLaMA2-13B-Tiefighter-GGUF/resolve/main/LLaMA2-13B-Tiefighter.Q4_K_S.gguf\",\"https://huggingface.co/KoboldAI/LLaMA2-13B-Estopia-GGUF/resolve/main/LLaMA2-13B-Estopia.Q4_K_S.gguf\",\"https://huggingface.co/mradermacher/Fimbulvetr-11B-v2-GGUF/resolve/main/Fimbulvetr-11B-v2.Q4_K_S.gguf\",\"https://huggingface.co/TheBloke/MythoMax-L2-13B-GGUF/resolve/main/mythomax-l2-13b.Q4_K_M.gguf\",\"https://huggingface.co/TheBloke/ReMM-SLERP-L2-13B-GGUF/resolve/main/remm-slerp-l2-13b.Q4_K_M.gguf\",\"https://huggingface.co/TheBloke/Xwin-LM-13B-v0.2-GGUF/resolve/main/xwin-lm-13b-v0.2.Q4_K_M.gguf\",\"https://huggingface.co/mradermacher/mini-magnum-12b-v1.1-GGUF/resolve/main/mini-magnum-12b-v1.1.Q4_K_S.gguf\",\"https://huggingface.co/TheBloke/Stheno-L2-13B-GGUF/resolve/main/stheno-l2-13b.Q4_K_M.gguf\",\"https://huggingface.co/TheBloke/MythoMax-L2-Kimiko-v2-13B-GGUF/resolve/main/mythomax-l2-kimiko-v2-13b.Q4_K_M.gguf\",\"https://huggingface.co/bartowski/Rocinante-12B-v1.1-GGUF/resolve/main/Rocinante-12B-v1.1-Q4_K_S.gguf\",\"https://huggingface.co/TheBloke/MistRP-Airoboros-7B-GGUF/resolve/main/mistrp-airoboros-7b.Q4_K_S.gguf\",\"https://huggingface.co/TheBloke/airoboros-mistral2.2-7B-GGUF/resolve/main/airoboros-mistral2.2-7b.Q4_K_S.gguf\",\"https://huggingface.co/concedo/KobbleTinyV2-1.1B-GGUF/resolve/main/KobbleTiny-Q4_K.gguf\",\"https://huggingface.co/grimjim/kukulemon-7B-GGUF/resolve/main/kukulemon-7B.Q8_0.gguf\",\"https://huggingface.co/mradermacher/LemonKunoichiWizardV3-GGUF/resolve/main/LemonKunoichiWizardV3.Q4_K_M.gguf\",\"https://huggingface.co/Lewdiculous/Kunoichi-DPO-v2-7B-GGUF-Imatrix/resolve/main/Kunoichi-DPO-v2-7B-Q4_K_M-imatrix.gguf\",\"https://huggingface.co/mradermacher/L3-8B-Stheno-v3.2-i1-GGUF/resolve/main/L3-8B-Stheno-v3.2.i1-Q4_K_M.gguf\",\"https://huggingface.co/Lewdiculous/Llama-3-Lumimaid-8B-v0.1-OAS-GGUF-IQ-Imatrix/resolve/main/v2-Llama-3-Lumimaid-8B-v0.1-OAS-Q4_K_M-imat.gguf\",\"https://huggingface.co/bartowski/NeuralDaredevil-8B-abliterated-GGUF/resolve/main/NeuralDaredevil-8B-abliterated-Q4_K_M.gguf\",\"https://huggingface.co/bartowski/L3-8B-Lunaris-v1-GGUF/resolve/main/L3-8B-Lunaris-v1-Q4_K_M.gguf\",\"https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2.0-8B-GGUF/resolve/main/L3-Umbral-Mind-RP-v2.0-8B.Q4_K_M.gguf\"]{allow-input: true}\n",
52+ " Layers = 99 #@param [99]{allow-input: true}\n " ,
53+ " ContextSize = 4096 #@param [4096,8192] {allow-input: true}\n " ,
54+ " FlashAttention = True #@param {type:\" boolean\" }\n " ,
55+ " Multiplayer = False #@param {type:\" boolean\" }\n " ,
56+ " FACommand = \"\"\n " ,
57+ " MPCommand = \"\"\n " ,
58+ " #@markdown <hr>\n " ,
59+ " LoadVisionMMProjector = False #@param {type:\" boolean\" }\n " ,
60+ " Mmproj = \" https://huggingface.co/koboldcpp/mmproj/resolve/main/llama-13b-mmproj-v1.5.Q4_1.gguf\" #@param [\" https://huggingface.co/koboldcpp/mmproj/resolve/main/llama-13b-mmproj-v1.5.Q4_1.gguf\" ,\" https://huggingface.co/koboldcpp/mmproj/resolve/main/mistral-7b-mmproj-v1.5-Q4_1.gguf\" ,\" https://huggingface.co/koboldcpp/mmproj/resolve/main/llama-7b-mmproj-v1.5-Q4_0.gguf\" ,\" https://huggingface.co/koboldcpp/mmproj/resolve/main/LLaMA3-8B_mmproj-Q4_1.gguf\" ]{allow-input: true}\n " ,
61+ " VCommand = \"\"\n " ,
62+ " #@markdown <hr>\n " ,
63+ " LoadImgModel = False #@param {type:\" boolean\" }\n " ,
64+ " ImgModel = \" https://huggingface.co/koboldcpp/imgmodel/resolve/main/imgmodel_ftuned_q4_0.gguf\" #@param [\" https://huggingface.co/koboldcpp/imgmodel/resolve/main/imgmodel_ftuned_q4_0.gguf\" ]{allow-input: true}\n " ,
65+ " SCommand = \"\"\n " ,
66+ " #@markdown <hr>\n " ,
67+ " LoadSpeechModel = False #@param {type:\" boolean\" }\n " ,
68+ " SpeechModel = \" https://huggingface.co/koboldcpp/whisper/resolve/main/whisper-base.en-q5_1.bin\" #@param [\" https://huggingface.co/koboldcpp/whisper/resolve/main/whisper-base.en-q5_1.bin\" ]{allow-input: true}\n " ,
69+ " WCommand = \"\"\n " ,
70+ " \n " ,
71+ " import os\n " ,
72+ " if not os.path.isfile(\" /opt/bin/nvidia-smi\" ):\n " ,
73+ " raise RuntimeError(\" ⚠️Colab did not give you a GPU due to usage limits, this can take a few hours before they let you back in. Check out https://lite.koboldai.net for a free alternative (that does not provide an API link but can load KoboldAI saves and chat cards) or subscribe to Colab Pro for immediate access.⚠️\" )\n " ,
74+ " \n " ,
75+ " %cd /content\n " ,
76+ " if Mmproj and LoadVisionMMProjector:\n " ,
77+ " VCommand = \" --mmproj vmodel.gguf\"\n " ,
78+ " else:\n " ,
79+ " SCommand = \"\"\n " ,
80+ " if ImgModel and LoadImgModel:\n " ,
81+ " SCommand = \" --sdmodel imodel.gguf --sdthreads 4 --sdquant --sdclamped\"\n " ,
82+ " else:\n " ,
83+ " SCommand = \"\"\n " ,
84+ " if SpeechModel and LoadSpeechModel:\n " ,
85+ " WCommand = \" --whispermodel wmodel.bin\"\n " ,
86+ " else:\n " ,
87+ " WCommand = \"\"\n " ,
88+ " if FlashAttention:\n " ,
89+ " FACommand = \" --flashattention\"\n " ,
90+ " else:\n " ,
91+ " FACommand = \"\"\n " ,
92+ " if Multiplayer:\n " ,
93+ " MPCommand = \" --multiplayer\"\n " ,
94+ " else:\n " ,
95+ " MPCommand = \"\"\n " ,
96+ " \n " ,
97+ " !echo Downloading KoboldCpp, please wait...\n " ,
98+ " !wget -O dlfile.tmp https://kcpplinux.concedo.workers.dev && mv dlfile.tmp koboldcpp_linux\n " ,
99+ " !test -f koboldcpp_linux && echo Download Successful || echo Download Failed\n " ,
100+ " !chmod +x ./koboldcpp_linux\n " ,
101+ " !apt update\n " ,
102+ " !apt install aria2 -y\n " ,
103+ " # simple fix for a common URL mistake\n " ,
104+ " if \" https://huggingface.co/\" in Model and \" /blob/main/\" in Model:\n " ,
105+ " Model = Model.replace(\" /blob/main/\" , \" /resolve/main/\" )\n " ,
106+ " !aria2c -x 10 -o model.gguf --summary-interval=5 --download-result=default --allow-overwrite=true --file-allocation=none $Model\n " ,
107+ " if VCommand:\n " ,
108+ " !aria2c -x 10 -o vmodel.gguf --summary-interval=5 --download-result=default --allow-overwrite=true --file-allocation=none $Mmproj\n " ,
109+ " if SCommand:\n " ,
110+ " !aria2c -x 10 -o imodel.gguf --summary-interval=5 --download-result=default --allow-overwrite=true --file-allocation=none $ImgModel\n " ,
111+ " if WCommand:\n " ,
112+ " !aria2c -x 10 -o wmodel.bin --summary-interval=5 --download-result=default --allow-overwrite=true --file-allocation=none $SpeechModel\n " ,
113+ " !./koboldcpp_linux model.gguf --usecublas 0 mmq --multiuser --gpulayers $Layers --contextsize $ContextSize --websearch --quiet --remotetunnel $FACommand $MPCommand $VCommand $SCommand $WCommand\n "
114+ ]
115+ }
116+ ],
117+ "metadata" : {
118+ "accelerator" : " GPU" ,
119+ "colab" : {
120+ "cell_execution_strategy" : " setup" ,
121+ "gpuType" : " T4" ,
122+ "include_colab_link" : true ,
123+ "private_outputs" : true ,
124+ "provenance" : []
125+ },
126+ "kernelspec" : {
127+ "display_name" : " Python 3" ,
128+ "name" : " python3"
129+ },
130+ "language_info" : {
131+ "name" : " python"
132+ }
133+ },
134+ "nbformat" : 4 ,
135+ "nbformat_minor" : 0
136+ }
0 commit comments