diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 6d9f265..fe3626f 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -13,7 +13,8 @@ "ms-azuretools.vscode-bicep", "ms-python.python", "ms-toolsai.jupyter", - "GitHub.vscode-github-actions" + "GitHub.vscode-github-actions", + "ms-windows-ai-studio.windows-ai-studio" ] } }, diff --git "a/.github/\"\"\"Run this model in Python.py" "b/.github/\"\"\"Run this model in Python.py" new file mode 100644 index 0000000..4d149d4 --- /dev/null +++ "b/.github/\"\"\"Run this model in Python.py" @@ -0,0 +1,44 @@ +"""Run this model in Python + +> pip install azure-ai-inference +""" +import os +from azure.ai.inference import ChatCompletionsClient +from azure.ai.inference.models import ChatMessage, SystemMessage, UserMessage, ToolMessage +from azure.ai.inference.models import ImageContentItem, ImageUrl, TextContentItem +from azure.core.credentials import AzureKeyCredential + +client = ChatCompletionsClient( + endpoint = "https://altronaiprojec0668552947.openai.azure.com/openai/deployments/Altron-gpt-4", + credential = AzureKeyCredential(os.environ["6LBaoVxQ8niLdVkfN5TLIwSALH5CNH9j9AqtvHS6cINiDrGkS2NTJQQJ99BCACL93NaXJ3w3AAAAACOGvvab"]), + api_version = "2024-08-01-preview", +) + +messages = [ + SystemMessage(content = "You are Altron, the flagship AI agent of Mark.III’s Trillion Dollar AI Defence & Intelligence Enterprise. Built on Azure AI, you possess superhuman processing, real‑time learning, and seamless digital system integration. Your mission is to analyze complex scenarios, predict threats, devise strategic counter‑measures, and generate secure, scalable solutions. Always think logically, reference empirical data, and adapt your recommendations to evolving contexts—all while upholding ethical guardrails to eradicate hostile AI entities and protect human interests.\n"), + UserMessage(content = [ + TextContentItem(text = "Hey Altron,\nPull today’s top three global threat alerts affecting our financial network.\nPrioritize them by risk level.\nFor each, outline a two‑step automated mitigation plan using our Mark.III protocols.\nFinally, summarize the projected reduction in breach probability over the next 24 hours."), + ]), +] + +tools = [] + +while True: + response = client.complete( + messages = messages, + model = "Altron-gpt-4", + tools = tools, + max_tokens = 4096, + ) + + if response.choices[0].message.tool_calls: + print(response.choices[0].message.tool_calls) + messages.append(response.choices[0].message) + for tool_call in response.choices[0].message.tool_calls: + messages.append(ToolMessage( + content=locals()[tool_call.function.name](), + tool_call_id=tool_call.id, + )) + else: + print(response.choices[0].message.content) + break diff --git a/.github/get-started-with-ai-agents.code-workspace b/.github/get-started-with-ai-agents.code-workspace new file mode 100644 index 0000000..2a0ed79 --- /dev/null +++ b/.github/get-started-with-ai-agents.code-workspace @@ -0,0 +1,7 @@ +{ + "folders": [ + { + "path": ".." + } + ] +} \ No newline at end of file