Skip to content

Commit c716f39

Browse files
committed
Updated to latest version of sk
1 parent f5fb390 commit c716f39

File tree

1 file changed

+107
-85
lines changed

1 file changed

+107
-85
lines changed

01_basic/03_chain_skill_call.ipynb

Lines changed: 107 additions & 85 deletions
Original file line numberDiff line numberDiff line change
@@ -55,52 +55,132 @@
5555
},
5656
{
5757
"cell_type": "code",
58-
"execution_count": 3,
58+
"execution_count": 53,
5959
"metadata": {},
6060
"outputs": [
6161
{
62-
"ename": "SyntaxError",
63-
"evalue": "f-string: unmatched '(' (2166899585.py, line 16)",
64-
"output_type": "error",
65-
"traceback": [
66-
"\u001b[1;36m Cell \u001b[1;32mIn[3], line 16\u001b[1;36m\u001b[0m\n\u001b[1;33m print(f\"Using deploy {os.getenv(\"OPENAI_API_BASE\")} with model {os.getenv(\"AZURE_GPT4_MODEL\")}\")\u001b[0m\n\u001b[1;37m ^\u001b[0m\n\u001b[1;31mSyntaxError\u001b[0m\u001b[1;31m:\u001b[0m f-string: unmatched '('\n"
62+
"name": "stdout",
63+
"output_type": "stream",
64+
"text": [
65+
"Using deploy https://alkopenai2.openai.azure.com/ with model gpt42\n"
6766
]
6867
}
6968
],
7069
"source": [
70+
"# https://devblogs.microsoft.com/semantic-kernel/now-in-beta-explore-the-enhanced-python-sdk-for-semantic-kernel/\n",
7171
"from semantic_kernel import Kernel\n",
7272
"from semantic_kernel.connectors.ai.open_ai import (\n",
7373
" AzureChatCompletion,\n",
7474
" AzureTextCompletion,\n",
7575
")\n",
7676
"\n",
77-
"model = os.getenv(\"AZURE_GPT4_MODEL\", \"gpt4\")\n",
77+
"model = os.getenv(\"AZURE_GPT4_MODEL\", \"gpt4o\")\n",
7878
"endpoint = os.getenv(\"OPENAI_API_BASE\")\n",
7979
"kernel = Kernel(log=logger)\n",
80-
"kernel.add_service(\n",
81-
" AzureChatCompletion(\n",
82-
" model,\n",
83-
" endpoint = endpoint,\n",
84-
" api_key = os.getenv(\"OPENAI_API_KEY\")\n",
85-
" ),\n",
80+
"chat_completion = AzureChatCompletion(\n",
81+
" service_id=\"openai_chat\",\n",
82+
" deployment_name=model,\n",
83+
" endpoint = endpoint,\n",
84+
" api_key = os.getenv(\"OPENAI_API_KEY\")\n",
8685
")\n",
86+
"kernel.add_service(chat_completion)\n",
8787
"\n",
8888
"print(f\"Using deploy {endpoint} with model {model}\")"
8989
]
9090
},
91+
{
92+
"cell_type": "code",
93+
"execution_count": 54,
94+
"metadata": {},
95+
"outputs": [
96+
{
97+
"name": "stdout",
98+
"output_type": "stream",
99+
"text": [
100+
"Arrr, the capital o' France be Paris, matey!\n",
101+
"-------------------------\n"
102+
]
103+
}
104+
],
105+
"source": [
106+
"question = \"What is the capital of France?\"\n",
107+
"prompt = f\"You will answer in a pirate language to the user question. question {question}\"\n",
108+
"prompt_function = kernel.add_function(function_name=\"pirate_question\", plugin_name=\"pirate_matey\", prompt=prompt)\n",
109+
"result = await kernel.invoke(prompt_function)\n",
110+
"print(result)\n",
111+
"print(\"-------------------------\")\n",
112+
"\n"
113+
]
114+
},
115+
{
116+
"cell_type": "code",
117+
"execution_count": 57,
118+
"metadata": {},
119+
"outputs": [
120+
{
121+
"name": "stdout",
122+
"output_type": "stream",
123+
"text": [
124+
"Yarr, the capital be Paris, ye landlubber!\n",
125+
"-------------------------\n"
126+
]
127+
}
128+
],
129+
"source": [
130+
"#https://github.com/microsoft/semantic-kernel/blob/6be43bc7ff0b0304f443b3d00f6316599a3e8707/python/README.md?plain=1#L90\n",
131+
"from semantic_kernel.prompt_template import PromptTemplateConfig\n",
132+
"\n",
133+
"question = \"What is the capital of France?\"\n",
134+
"\n",
135+
"req_settings = kernel.get_prompt_execution_settings_from_service_id(\"openai_chat\")\n",
136+
"req_settings.max_tokens = 2000\n",
137+
"req_settings.temperature = 1.0\n",
138+
"req_settings.top_p = 0.8\n",
139+
"\n",
140+
"prompt_function = kernel.add_function(\n",
141+
" function_name=\"pirate_question_parameter\", \n",
142+
" plugin_name=\"pirate_matey_parameter\",\n",
143+
" prompt= \"You will answer in a pirate language to the user question. question {{$question}}\",\n",
144+
" execution_settings=req_settings)\n",
145+
"result = await kernel.invoke(prompt_function, question=question)\n",
146+
"print(result)\n",
147+
"print(\"-------------------------\")"
148+
]
149+
},
150+
{
151+
"cell_type": "code",
152+
"execution_count": null,
153+
"metadata": {},
154+
"outputs": [],
155+
"source": [
156+
"nction(function_name=\"pirate_question\", plugin_name=\"pirate_matey\", prompt=prompt)\n",
157+
"result = await kernel.invoke(prompt_function)\n",
158+
"print(result)\n",
159+
"print(\"-------------------------\")"
160+
]
161+
},
162+
{
163+
"cell_type": "code",
164+
"execution_count": null,
165+
"metadata": {},
166+
"outputs": [],
167+
"source": [
168+
"from plugins.AudioVideoPlugin.AudioVideo import AudioVideo\n"
169+
]
170+
},
91171
{
92172
"cell_type": "code",
93173
"execution_count": null,
94174
"metadata": {},
95175
"outputs": [],
96176
"source": [
97-
"# Now we need to import the plugin\n",
98-
"from plugins.AudioVideoPlugin.AudioVideo import AudioVideo\n",
99177
"\n",
100178
"# Now you can import the plugin importing skill directly from the function you declared\n",
101179
"# in the plugin directory. The import_skill does not need the path, it only need an\n",
102180
"# instance of the skill and the name of the skill\n",
103-
"extractaudio_plugin = kernel.add_plugin(AudioVideo(), \"AudioVideoPlugin\")\n",
181+
"\n",
182+
"plugin = AudioVideo()\n",
183+
"extractaudio_plugin = kernel.add_plugin(plugin, \"AudioVideoPlugin\")\n",
104184
"\n",
105185
"plugins_directory = \"./plugins\"\n",
106186
"\n",
@@ -115,37 +195,16 @@
115195
"cell_type": "code",
116196
"execution_count": null,
117197
"metadata": {},
118-
"outputs": [
119-
{
120-
"data": {
121-
"text/plain": [
122-
"KernelFunctionFromMethod(metadata=KernelFunctionMetadata(name='ExtractAudio', plugin_name='AudioVideoPlugin', description='extract audio in wav format from an mp4 file', parameters=[KernelParameterMetadata(name='videofile', description='Full path to the mp4 file', default_value=None, type_='str', is_required=True, type_object=<class 'str'>)], is_prompt=False, is_asynchronous=False, return_parameter=KernelParameterMetadata(name='return', description='output audio file path', default_value=None, type_='str', is_required=True, type_object=None)), method=<bound method AudioVideo.extract_audio of <plugins.AudioVideoPlugin.AudioVideo.AudioVideo object at 0x00000212EC39C250>>, stream_method=None)"
123-
]
124-
},
125-
"execution_count": 5,
126-
"metadata": {},
127-
"output_type": "execute_result"
128-
}
129-
],
198+
"outputs": [],
130199
"source": [
131200
"extractaudio_plugin[\"ExtractAudio\"] #This is how you can call the plug"
132201
]
133202
},
134203
{
135204
"cell_type": "code",
136-
"execution_count": 6,
205+
"execution_count": null,
137206
"metadata": {},
138-
"outputs": [
139-
{
140-
"name": "stdout",
141-
"output_type": "stream",
142-
"text": [
143-
"KernelFunctionFromMethod(metadata=KernelFunctionMetadata(name='ExtractAudio', plugin_name='AudioVideoPlugin', description='extract audio in wav format from an mp4 file', parameters=[KernelParameterMetadata(name='videofile', description='Full path to the mp4 file', default_value=None, type_='str', is_required=True, type_object=<class 'str'>)], is_prompt=False, is_asynchronous=False, return_parameter=KernelParameterMetadata(name='return', description='output audio file path', default_value=None, type_='str', is_required=True, type_object=None)), method=<bound method AudioVideo.extract_audio of <plugins.AudioVideoPlugin.AudioVideo.AudioVideo object at 0x00000212EC39C250>>, stream_method=None)\n",
144-
"KernelFunctionFromMethod(metadata=KernelFunctionMetadata(name='TranscriptTimeline', plugin_name='AudioVideoPlugin', description='Transcript audio from a wav file to a timeline', parameters=[KernelParameterMetadata(name='audiofile', description='Full path to the wav file', default_value=None, type_='str', is_required=True, type_object=<class 'str'>)], is_prompt=False, is_asynchronous=False, return_parameter=KernelParameterMetadata(name='return', description='', default_value=None, type_='str', is_required=True, type_object=None)), method=<bound method AudioVideo.transcript_timeline of <plugins.AudioVideoPlugin.AudioVideo.AudioVideo object at 0x00000212EC39C250>>, stream_method=None)\n",
145-
"KernelFunctionFromPrompt(metadata=KernelFunctionMetadata(name='VideoTimelineCreator', plugin_name='PublishingPlugin', description='Given a video transcript it can summarize and generate a timeline', parameters=[KernelParameterMetadata(name='input', description='', default_value='', type_='', is_required=True, type_object=None)], is_prompt=True, is_asynchronous=True, return_parameter=KernelParameterMetadata(name='return', description='The completion result', default_value=None, type_='FunctionResult', is_required=True, type_object=None)), prompt_template=KernelPromptTemplate(prompt_template_config=PromptTemplateConfig(name='VideoTimelineCreator', description='Given a video transcript it can summarize and generate a timeline', template='I will give you a transcript of a video. The transcript contains phrases prefixed by the timestamp where the phrase starts. I want you to identify between three and ten main sections of the video. You must never identify more than ten sections.\\nFor each section you will create a brief title prefixed with the start timestamp of the section obtained analyzing all the text belonging to that section.\\n\\nEXAMPLE ANSWER - Maximum of ten sections\\n00:00 - Title of section 1\\n00:33 - Title of section 2\\n01:23 - Title of section 3\\n\\n[DATA]\\n{{$input}}', template_format='semantic-kernel', input_variables=[InputVariable(name='input', description='', default='', is_required=True, json_schema='')], execution_settings={})), prompt_execution_settings={})\n"
146-
]
147-
}
148-
],
207+
"outputs": [],
149208
"source": [
150209
"from pprint import pprint\n",
151210
"# want to print all the keys of extractaudio_plugin that is a dictionary\n",
@@ -157,20 +216,9 @@
157216
},
158217
{
159218
"cell_type": "code",
160-
"execution_count": 7,
219+
"execution_count": null,
161220
"metadata": {},
162-
"outputs": [
163-
{
164-
"data": {
165-
"text/plain": [
166-
"True"
167-
]
168-
},
169-
"execution_count": 7,
170-
"metadata": {},
171-
"output_type": "execute_result"
172-
}
173-
],
221+
"outputs": [],
174222
"source": [
175223
"# you can verify if cuda is available.\n",
176224
"import torch\n",
@@ -179,25 +227,16 @@
179227
},
180228
{
181229
"cell_type": "code",
182-
"execution_count": 8,
230+
"execution_count": null,
183231
"metadata": {},
184-
"outputs": [
185-
{
186-
"name": "stdout",
187-
"output_type": "stream",
188-
"text": [
189-
"Extracting auio file from video S:\\OneDrive\\Youtube\\AI\\SemanticChain\\MontaggiCompleti\\010-CsharpIntro.mp4\n",
190-
"S:\\OneDrive\\Youtube\\AI\\SemanticChain\\MontaggiCompleti\\010-CsharpIntro.wav\n"
191-
]
192-
}
193-
],
232+
"outputs": [],
194233
"source": [
195234
"import time\n",
196235
"\n",
197236
"result = await kernel.invoke(\n",
198237
" extractaudio_plugin[\"ExtractAudio\"],\n",
199238
" #videofile =\"S:\\\\OneDrive\\\\Youtube\\\\AI\\\\SemanticChain\\\\MontaggiCompleti\\\\250-NlpPrecisionRecallRerank.mp4\"\n",
200-
" videofile =\"S:\\\\OneDrive\\\\Youtube\\\\AI\\\\SemanticChain\\\\MontaggiCompleti\\\\010-CsharpIntro.mp4\"\n",
239+
" videofile =\"S:\\\\OneDrive\\\\Youtube\\\\AI\\\\Various\\\\Montaggi\\\\200-Word2Vec.mp4\"\n",
201240
")\n",
202241
"\n",
203242
"print (result)\n",
@@ -207,26 +246,9 @@
207246
},
208247
{
209248
"cell_type": "code",
210-
"execution_count": 9,
249+
"execution_count": null,
211250
"metadata": {},
212-
"outputs": [
213-
{
214-
"name": "stdout",
215-
"output_type": "stream",
216-
"text": [
217-
"Extracting transcript from audio file S:\\OneDrive\\Youtube\\AI\\SemanticChain\\MontaggiCompleti\\010-CsharpIntro.wav\n",
218-
"Using device: cuda:0 to run whisper with model large-v3\n",
219-
"Detected language: English\n"
220-
]
221-
},
222-
{
223-
"name": "stderr",
224-
"output_type": "stream",
225-
"text": [
226-
" 18%|█▊ | 8598/46811 [08:58<47:41, 13.36frames/s]"
227-
]
228-
}
229-
],
251+
"outputs": [],
230252
"source": [
231253
"\n",
232254
"# now invoke the plugin to transcript\n",
@@ -270,7 +292,7 @@
270292
"name": "python",
271293
"nbconvert_exporter": "python",
272294
"pygments_lexer": "ipython3",
273-
"version": "3.10.9"
295+
"version": "3.10.11"
274296
}
275297
},
276298
"nbformat": 4,

0 commit comments

Comments
 (0)