From d6609cbb73542d2b8bdd12515d80fc1bea4d7e7a Mon Sep 17 00:00:00 2001 From: Pritam Das <69068731+Pritam3355@users.noreply.github.com> Date: Sat, 19 Oct 2024 01:02:11 +0530 Subject: [PATCH 1/8] Create README.md --- llm_experiments/README.md | 89 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 llm_experiments/README.md diff --git a/llm_experiments/README.md b/llm_experiments/README.md new file mode 100644 index 000000000000..d49513f9d026 --- /dev/null +++ b/llm_experiments/README.md @@ -0,0 +1,89 @@ + + + +# Mixtral-Experiment Series + +Welcome to the Mixtral-Experiment series! This series of notebooks and scripts aims to provide a comprehensive guide on investigating the internal workings of Large Language Models (LLMs), understanding how they process inputs, and experimenting with their architectures. + +## Table of Contents + +- [Introduction](#introduction) +- [Series Overview](#series-overview) +- [Getting Started](#getting-started) +- [Notebooks and Scripts](#notebooks-and-scripts) +- [Contributing](#contributing) +- [License](#license) + +## Introduction + +Large Language Models (LLMs) have revolutionized the field of natural language processing (NLP) by achieving state-of-the-art performance on various tasks. However, understanding their internal workings and how they process inputs can be challenging. This series aims to demystify LLMs by providing detailed explanations, hands-on experiments, and practical tips for tweaking their architectures. + +## Series Overview + +The Mixtral-Experiment series will cover the following topics: + +1. **Understanding LLM Architectures**: + - An overview of popular LLM architectures like Transformers, BERT, and Mixtral. + - Detailed explanations of key components such as embedding layers, self-attention mechanisms, and Mixture of Experts (MoE) layers. + +2. **Investigating Input Processing**: + - How inputs are tokenized and embedded. + - The role of attention mechanisms in processing sequences. + - Visualizing and analyzing the outputs at various layers of the model. + +3. **Tweaking LLM Architectures**: + - Experimenting with different configurations and hyperparameters. + - Modifying existing LLM architectures to improve performance or adapt to specific tasks. + - Implementing custom layers and components. + +4. **Conducting New Experiments**: + - Designing and implementing new experiments to test hypotheses about LLM behavior. + - Evaluating the impact of architectural changes on model performance. + - Sharing insights and findings with the community. + +## Getting Started + +To get started with the LLM-Experiment series, you will need the following: + +1. **Python Environment**: + - All these notebooks are created in Kaggle or Google Colab, So it's recommended to use the same to reproduce the results for other models + + +2. **Hugging Face Account**: + - Create a Hugging Face account and obtain an API token. + - Login to Hugging Face using the provided token or username and token. + - Most of the Mistral,Llama models needs some sort of Agreement acceptance + +3. **Notebooks and Scripts**: + - Clone this repository to access the notebooks and scripts or you can directly open in Google Colab + - Follow the instructions in each notebook to run the experiments and analyze the results. + +## Notebooks and Scripts + +The series will include the following notebooks and scripts: + +1. **Mixtral Model Analysis**: + - Analyzing the architecture and configuration of the Mixtral model. + - Registering hooks to capture the outputs at various layers. + +2. **Input Processing and Embedding**: - Upcoming + + +3. **Attention Mechanisms and improvements**: - Upcoming + + +4. **Rolling Buffer,KV-cache,Sliding Window Attention**: - Upcoming + + +5. **Tweaking Model Architectures - Adapters,Down-Casting**: - Upcoming + + +## Contributing + +We welcome contributions from the community! If you have any ideas, suggestions, or improvements, please feel free to open an issue or submit a pull request. + +## License + +This project is licensed under the MIT License. See the [LICENSE](LICENSE) file for more details. + + From 28b1f02fef762f2a35d07258981bbe1afd5e0c1b Mon Sep 17 00:00:00 2001 From: Pritam Das <69068731+Pritam3355@users.noreply.github.com> Date: Sat, 19 Oct 2024 01:06:34 +0530 Subject: [PATCH 2/8] Update README.md --- llm_experiments/README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/llm_experiments/README.md b/llm_experiments/README.md index d49513f9d026..06283bf31a07 100644 --- a/llm_experiments/README.md +++ b/llm_experiments/README.md @@ -1,9 +1,9 @@ -# Mixtral-Experiment Series +# LLM-Experiment Series -Welcome to the Mixtral-Experiment series! This series of notebooks and scripts aims to provide a comprehensive guide on investigating the internal workings of Large Language Models (LLMs), understanding how they process inputs, and experimenting with their architectures. +Welcome to the LL-Experiment series! This series of notebooks and scripts aims to provide a comprehensive guide on investigating the internal workings of Large Language Models (LLMs), understanding how they process inputs, and experimenting with their architectures. ## Table of Contents @@ -20,11 +20,11 @@ Large Language Models (LLMs) have revolutionized the field of natural language p ## Series Overview -The Mixtral-Experiment series will cover the following topics: +The LLM-Experiment series will cover the following topics: 1. **Understanding LLM Architectures**: - - An overview of popular LLM architectures like Transformers, BERT, and Mixtral. - - Detailed explanations of key components such as embedding layers, self-attention mechanisms, and Mixture of Experts (MoE) layers. + - An overview of popular open source LLM architectures like Whisper, Llama, and Mixtral. + - Key trouble shooting during experimentation 2. **Investigating Input Processing**: - How inputs are tokenized and embedded. From 998eed43a3938bf6c5b2b7bd1f2496095249754c Mon Sep 17 00:00:00 2001 From: Pritam Das <69068731+Pritam3355@users.noreply.github.com> Date: Sat, 19 Oct 2024 01:08:56 +0530 Subject: [PATCH 3/8] Add files via upload --- llm_experiments/Mixtral_Experiment.ipynb | 965 +++++++++++++++++++++++ 1 file changed, 965 insertions(+) create mode 100644 llm_experiments/Mixtral_Experiment.ipynb diff --git a/llm_experiments/Mixtral_Experiment.ipynb b/llm_experiments/Mixtral_Experiment.ipynb new file mode 100644 index 000000000000..4903471af082 --- /dev/null +++ b/llm_experiments/Mixtral_Experiment.ipynb @@ -0,0 +1,965 @@ +{ + "metadata": { + "kernelspec": { + "language": "python", + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.10.14", + "mimetype": "text/x-python", + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "pygments_lexer": "ipython3", + "nbconvert_exporter": "python", + "file_extension": ".py" + }, + "kaggle": { + "accelerator": "nvidiaTeslaT4", + "dataSources": [], + "dockerImageVersionId": 30787, + "isInternetEnabled": true, + "language": "python", + "sourceType": "notebook", + "isGpuEnabled": true + }, + "colab": { + "name": "Mixtral-Experiment", + "provenance": [] + } + }, + "nbformat_minor": 0, + "nbformat": 4, + "cells": [ + { + "cell_type": "code", + "source": [ + "from huggingface_hub import login\n", + "\n", + "# Fetch Hugging Face username and token from Colab secrets\n", + "HF_USERNAME = \"pritam3355\"\n", + "HF_TOKEN = \"HF_TOKEN\"\n", + "\n", + "# Login to Hugging Face\n", + "try:\n", + " login(token=HF_TOKEN)\n", + "except ValueError:\n", + " # If token is not valid or found, login with username and token\n", + " # (likely requires manual authorization)\n", + " login(username=HF_USERNAME, token=HF_TOKEN)" + ], + "metadata": { + "_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5", + "_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19", + "trusted": true, + "execution": { + "iopub.status.busy": "2024-10-18T18:20:43.271188Z", + "iopub.execute_input": "2024-10-18T18:20:43.271841Z", + "iopub.status.idle": "2024-10-18T18:20:43.353056Z", + "shell.execute_reply.started": "2024-10-18T18:20:43.271801Z", + "shell.execute_reply": "2024-10-18T18:20:43.35218Z" + }, + "id": "H5JWFz2XAAak", + "outputId": "af45db86-89f6-4349-c2d9-15d969f3d3f2" + }, + "outputs": [ + { + "name": "stdout", + "text": "The token has not been saved to the git credentials helper. Pass `add_to_git_credential=True` in this function directly or `--add-to-git-credential` if using via `huggingface-cli` if you want to set the git credential as well.\nToken is valid (permission: fineGrained).\nYour token has been saved to /root/.cache/huggingface/token\nLogin successful\n", + "output_type": "stream" + } + ], + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Here Using Mixtral model to extract and analyze how the input sequence is processed in Forward pass. Mixtral is similar to Mistal model but has more parameters" + ], + "metadata": { + "id": "bLD_CkBUAQMy" + } + }, + { + "cell_type": "code", + "source": [ + "import torch\n", + "from transformers import AutoModelForCausalLM, AutoTokenizer\n", + "\n", + "model_id = \"mistralai/Mixtral-8x7B-v0.1\"\n", + "tokenizer = AutoTokenizer.from_pretrained(model_id,device=\"auto\")\n", + "\n", + "model = AutoModelForCausalLM.from_pretrained(model_id,trust_remote_code=True,\n", + " torch_dtype=torch.bfloat16,\n", + " low_cpu_mem_usage=True,device_map=\"auto\")\n" + ], + "metadata": { + "trusted": true, + "execution": { + "iopub.status.busy": "2024-10-18T18:20:43.354757Z", + "iopub.execute_input": "2024-10-18T18:20:43.355493Z", + "iopub.status.idle": "2024-10-18T18:30:40.651163Z", + "shell.execute_reply.started": "2024-10-18T18:20:43.355448Z", + "shell.execute_reply": "2024-10-18T18:30:40.650377Z" + }, + "id": "E3nm2iWcAAaq", + "outputId": "82d9a608-7eff-4578-b328-f9f773ed4f39", + "colab": { + "referenced_widgets": [ + "fa5c2b7f05bc412993098a3731e72989", + "e64a4b6de34d4f40b88305ce507e3658", + "4675ed906a964735b4334458935ab4b9", + "e4f24bafae8f4397b76818a34ca9d6e4", + "3621e97c28544d34ab3953c22d227cd0", + "dd02aa16c10b4ab78373aa3dae939489", + "44e75ecc95b74f03a7a58e6ea21165c1", + "6d26de44c0334077b6c14104747a48ad", + "57c7fa8051a94bcb96c0309651ab8298", + "b736720173fd4ba5bbe54cbcc1177423", + "368fe041fff84949ac30d3d45ac78a0d", + "79ff492b16e946c8a6238d31b181ffc8", + "2a12b5905b434c11beaaceaf7e1a6394", + "9f16b85fde7148b7931c30fb024c87d5", + "f0bae3fc9925442e82d58ecd7a305808", + "2181a83c39114bc78b1e4859b3ccdfed", + "14ad494e78084d8983bc6c0751f9d941", + "280600190e10484db98261256542f236", + "562e9f5c0d0d4228b218553019e483b6", + "cc6675e71cea4018b6adff29d60f0a82", + "39633f760e104265b1ddc2bcb3e4961d", + "64288ea1c3074a528339b9d0f9729d18", + "584114fa6b554a1495f6aa14011e0cc6", + "2756416bfbcf474c94c1ca2ab4b7d8e3", + "8c6e4f33682040feb42c1385c66b7ba2", + "68cc9722525c46328cf963c2a4f2740a", + "06367bbf0c094ba1bc7d481fb1bfc3f9", + "1434b26ed3b4449b8fd6a76e0f1e5c97" + ] + } + }, + "outputs": [ + { + "output_type": "display_data", + "data": { + "text/plain": "tokenizer_config.json: 0%| | 0.00/967 [00:00" + }, + "metadata": {} + } + ], + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "## Forward Pass" + ], + "metadata": { + "id": "vORmhXXTAAa3" + } + }, + { + "cell_type": "code", + "source": [ + "input_text = \"The quick brown fox jumps over the lazy dog !\"\n", + "\n", + "# Tokenize the input text\n", + "inputs = tokenizer(input_text, return_tensors=\"pt\")\n", + "print(\"Tokenized inputs {'input_ids','attention_mask'} - \",inputs)\n", + "print(\"Decoded tokens : \",tokenizer.decode(inputs['input_ids'][0]))" + ], + "metadata": { + "trusted": true, + "execution": { + "iopub.status.busy": "2024-10-18T18:30:40.706383Z", + "iopub.execute_input": "2024-10-18T18:30:40.706895Z", + "iopub.status.idle": "2024-10-18T18:30:40.728093Z", + "shell.execute_reply.started": "2024-10-18T18:30:40.706863Z", + "shell.execute_reply": "2024-10-18T18:30:40.727243Z" + }, + "id": "dcC4RjNTAAa4", + "outputId": "f0d4c2e3-b7f6-471d-9b9b-ce5316c47431" + }, + "outputs": [ + { + "name": "stdout", + "text": "Tokenized inputs {'input_ids','attention_mask'} - {'input_ids': tensor([[ 1, 415, 2936, 9060, 285, 1142, 461, 10575, 754, 272,\n 17898, 3914, 918]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])}\nDecoded tokens : The quick brown fox jumps over the lazy dog !\n", + "output_type": "stream" + } + ], + "execution_count": null + }, + { + "cell_type": "code", + "source": [ + "\n", + "with torch.no_grad():\n", + " model_output = model(**inputs)" + ], + "metadata": { + "trusted": true, + "execution": { + "iopub.status.busy": "2024-10-18T18:30:40.729287Z", + "iopub.execute_input": "2024-10-18T18:30:40.729872Z", + "iopub.status.idle": "2024-10-18T18:36:43.660892Z", + "shell.execute_reply.started": "2024-10-18T18:30:40.72983Z", + "shell.execute_reply": "2024-10-18T18:36:43.660087Z" + }, + "id": "4x2A5-m-AAa6", + "outputId": "d0fc43d2-1229-4582-d4d3-6b5f745be24e" + }, + "outputs": [ + { + "name": "stderr", + "text": "Starting from v4.46, the `logits` model output will have the same type as the model (except at train time, where it will always be FP32)\n", + "output_type": "stream" + } + ], + "execution_count": null + }, + { + "cell_type": "code", + "source": [ + "for layer, output in outputs.items():\n", + " print(f\"Output at {layer}: \")\n", + " if isinstance(output, torch.Tensor):\n", + " print(output.shape, type(output))\n", + " elif isinstance(output, tuple):\n", + " for i, o in enumerate(output):\n", + " print(f\"Output {i}: {o.shape if isinstance(o, torch.Tensor) else type(o)}\")\n", + " else:\n", + " print(type(output))\n", + " print(\"-\" * 100)" + ], + "metadata": { + "trusted": true, + "execution": { + "iopub.status.busy": "2024-10-18T18:56:11.28238Z", + "iopub.execute_input": "2024-10-18T18:56:11.283252Z", + "iopub.status.idle": "2024-10-18T18:56:11.291437Z", + "shell.execute_reply.started": "2024-10-18T18:56:11.283214Z", + "shell.execute_reply": "2024-10-18T18:56:11.290478Z" + }, + "id": "xVuaYV3pAAa7", + "outputId": "84e2f36e-0f10-4be1-9fdd-581fe61fabb1" + }, + "outputs": [ + { + "name": "stdout", + "text": "Output at embed_tokens: \ntorch.Size([1, 13, 4096]) \n----------------------------------------------------------------------------------------------------\nOutput at self_attn_layer_1: \nOutput 0: torch.Size([1, 13, 4096])\nOutput 1: \nOutput 2: \n----------------------------------------------------------------------------------------------------\nOutput at block_sparse_moe_experts: \ntorch.Size([3, 4096]) \n----------------------------------------------------------------------------------------------------\nOutput at post_attention_layernorm: \ntorch.Size([1, 13, 4096]) \n----------------------------------------------------------------------------------------------------\nOutput at norm: \ntorch.Size([1, 13, 4096]) \n----------------------------------------------------------------------------------------------------\nOutput at lm_head: \ntorch.Size([1, 13, 32000]) \n----------------------------------------------------------------------------------------------------\nOutput at input_layernorm: \ntorch.Size([1, 13, 4096]) \n----------------------------------------------------------------------------------------------------\nOutput at self_attn_q_proj: \ntorch.Size([1, 13, 4096]) \n----------------------------------------------------------------------------------------------------\nOutput at self_attn_k_proj: \ntorch.Size([1, 13, 1024]) \n----------------------------------------------------------------------------------------------------\nOutput at self_attn_v_proj: \ntorch.Size([1, 13, 1024]) \n----------------------------------------------------------------------------------------------------\nOutput at self_attn_o_proj: \ntorch.Size([1, 13, 4096]) \n----------------------------------------------------------------------------------------------------\nOutput at block_sparse_moe_gate: \ntorch.Size([13, 8]) \n----------------------------------------------------------------------------------------------------\n", + "output_type": "stream" + } + ], + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "\n", + "\n", + "### Explanation of Shapes:\n", + "\n", + "### 1. **embed_tokens**\n", + "- **Shape:** `torch.Size([1, 13, 4096])`\n", + "- **Explanation:**\n", + " - `1`: Batch size (number of sequences in this batch, here it's 1 sequence).\n", + " - `13`: Sequence length (the number of tokens in the input, here 13 tokens).\n", + " - `4096`: Embedding size (each token is mapped to a 4096-dimensional vector).\n", + " \n", + " **Shape Format:** `(batch_size, seq_len, embed_dim)`\n", + "\n", + "### 2. **self_attn_layer_1**\n", + "- **Shape (Output 0):** `torch.Size([1, 13, 4096])`\n", + "- **Explanation:**\n", + " - `1`: Batch size.\n", + " - `13`: Sequence length.\n", + " - `4096`: Hidden size (output of the attention mechanism).\n", + "\n", + " **Shape Format:** `(batch_size, seq_len, hidden_dim)`\n", + "\n", + "### 3. **block_sparse_moe_experts**\n", + "- **Shape:** `torch.Size([3, 4096])`\n", + "- **Explanation:**\n", + " - `3`: Number of activated experts for the MoE (Mixture of Experts) layer. In the `block_sparse_moe`, two experts are chosen per token (2 tokens in the batch may activate the same experts, hence the 3).\n", + " - `4096`: Expert embedding size (the dimensionality of the expert's output).\n", + " \n", + " **Shape Format:** `(num_experts, expert_embed_dim)`\n", + "\n", + "### 4. **post_attention_layernorm**\n", + "- **Shape:** `torch.Size([1, 13, 4096])`\n", + "- **Explanation:**\n", + " - `1`: Batch size.\n", + " - `13`: Sequence length.\n", + " - `4096`: Output dimension after the post-attention normalization step.\n", + " \n", + " **Shape Format:** `(batch_size, seq_len, hidden_dim)`\n", + "\n", + "### 5. **norm**\n", + "- **Shape:** `torch.Size([1, 13, 4096])`\n", + "- **Explanation:**\n", + " - `1`: Batch size.\n", + " - `13`: Sequence length.\n", + " - `4096`: Output dimension after applying the final normalization layer.\n", + " \n", + " **Shape Format:** `(batch_size, seq_len, hidden_dim)`\n", + "\n", + "### 6. **lm_head**\n", + "- **Shape:** `torch.Size([1, 13, 32000])`\n", + "- **Explanation:**\n", + " - `1`: Batch size.\n", + " - `13`: Sequence length.\n", + " - `32000`: Vocabulary size (logits over the vocabulary for each token in the sequence).\n", + " \n", + " **Shape Format:** `(batch_size, seq_len, vocab_size)`\n", + "\n", + "### 7. **input_layernorm**\n", + "- **Shape:** `torch.Size([1, 13, 4096])`\n", + "- **Explanation:**\n", + " - `1`: Batch size.\n", + " - `13`: Sequence length.\n", + " - `4096`: Output dimension after the input layer normalization step.\n", + " \n", + " **Shape Format:** `(batch_size, seq_len, hidden_dim)`\n", + "\n", + "### 8. **self_attn_q_proj**\n", + "- **Shape:** `torch.Size([1, 13, 4096])`\n", + "- **Explanation:**\n", + " - `1`: Batch size.\n", + " - `13`: Sequence length.\n", + " - `4096`: Query projection size (the hidden state is projected to the query vector space).\n", + " \n", + " **Shape Format:** `(batch_size, seq_len, hidden_dim)`\n", + "\n", + "### 9. **self_attn_k_proj**\n", + "- **Shape:** `torch.Size([1, 13, 1024])`\n", + "- **Explanation:**\n", + " - `1`: Batch size.\n", + " - `13`: Sequence length.\n", + " - `1024`: Key projection size (here, the key is projected to a smaller dimensional space compared to queries/values).\n", + " \n", + " **Shape Format:** `(batch_size, seq_len, key_dim)`\n", + "\n", + "### 10. **self_attn_v_proj**\n", + "- **Shape:** `torch.Size([1, 13, 1024])`\n", + "- **Explanation:**\n", + " - `1`: Batch size.\n", + " - `13`: Sequence length.\n", + " - `1024`: Value projection size (the values are also projected to the same size as the keys).\n", + " \n", + " **Shape Format:** `(batch_size, seq_len, value_dim)`\n", + "\n", + "### 11. **self_attn_o_proj**\n", + "- **Shape:** `torch.Size([1, 13, 4096])`\n", + "- **Explanation:**\n", + " - `1`: Batch size.\n", + " - `13`: Sequence length.\n", + " - `4096`: Output projection size (the final result after the attention mechanism is projected back to the original hidden dimension).\n", + " \n", + " **Shape Format:** `(batch_size, seq_len, hidden_dim)`\n", + "\n", + "### 12. **block_sparse_moe_gate**\n", + "- **Shape:** `torch.Size([13, 8])`\n", + "- **Explanation:**\n", + " - `13`: Sequence length (the gate operates per token).\n", + " - `8`: Number of experts (gating decisions are made over all available experts).\n", + " \n", + " **Shape Format:** `(seq_len, num_experts)`\n", + "\n", + "### Summary Table:\n", + "\n", + "| Layer Name | Shape Format | Dimensions | Notes |\n", + "|------------------------------|------------------------------------|------------|------------------------------------------------------------|\n", + "| `embed_tokens` | `(batch_size, seq_len, embed_dim)` | `[1, 13, 4096]` | Embedding tokens from vocabulary. |\n", + "| `self_attn_layer_1` | `(batch_size, seq_len, hidden_dim)`| `[1, 13, 4096]` | Output of first attention layer. |\n", + "| `block_sparse_moe_experts` | `(num_experts, expert_embed_dim)` | `[3, 4096]` | Expert outputs in MoE block. |\n", + "| `post_attention_layernorm` | `(batch_size, seq_len, hidden_dim)`| `[1, 13, 4096]` | Layer norm after attention. |\n", + "| `norm` | `(batch_size, seq_len, hidden_dim)`| `[1, 13, 4096]` | Final normalization layer. |\n", + "| `lm_head` | `(batch_size, seq_len, vocab_size)`| `[1, 13, 32000]` | Logits for each token over the vocabulary. |\n", + "| `input_layernorm` | `(batch_size, seq_len, hidden_dim)`| `[1, 13, 4096]` | Input layer normalization. |\n", + "| `self_attn_q_proj` | `(batch_size, seq_len, hidden_dim)`| `[1, 13, 4096]` | Query projection in self-attention. |\n", + "| `self_attn_k_proj` | `(batch_size, seq_len, key_dim)` | `[1, 13, 1024]` | Key projection in self-attention. |\n", + "| `self_attn_v_proj` | `(batch_size, seq_len, value_dim)` | `[1, 13, 1024]` | Value projection in self-attention. |\n", + "| `self_attn_o_proj` | `(batch_size, seq_len, hidden_dim)`| `[1, 13, 4096]` | Output projection after attention. |\n", + "| `block_sparse_moe_gate` | `(seq_len, num_experts)` | `[13, 8]` | Gating decisions for the mixture of experts. |\n", + "\n" + ], + "metadata": { + "id": "FyugDOzXAAa8" + } + } + ] +} \ No newline at end of file From 7019bf4a7fdc8a4fa53f26af222c4ac87096c7e4 Mon Sep 17 00:00:00 2001 From: Pritam Das <69068731+Pritam3355@users.noreply.github.com> Date: Sat, 19 Oct 2024 01:17:28 +0530 Subject: [PATCH 4/8] Delete llm_experiments directory --- llm_experiments/Mixtral_Experiment.ipynb | 965 ----------------------- llm_experiments/README.md | 89 --- 2 files changed, 1054 deletions(-) delete mode 100644 llm_experiments/Mixtral_Experiment.ipynb delete mode 100644 llm_experiments/README.md diff --git a/llm_experiments/Mixtral_Experiment.ipynb b/llm_experiments/Mixtral_Experiment.ipynb deleted file mode 100644 index 4903471af082..000000000000 --- a/llm_experiments/Mixtral_Experiment.ipynb +++ /dev/null @@ -1,965 +0,0 @@ -{ - "metadata": { - "kernelspec": { - "language": "python", - "display_name": "Python 3", - "name": "python3" - }, - "language_info": { - "name": "python", - "version": "3.10.14", - "mimetype": "text/x-python", - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "pygments_lexer": "ipython3", - "nbconvert_exporter": "python", - "file_extension": ".py" - }, - "kaggle": { - "accelerator": "nvidiaTeslaT4", - "dataSources": [], - "dockerImageVersionId": 30787, - "isInternetEnabled": true, - "language": "python", - "sourceType": "notebook", - "isGpuEnabled": true - }, - "colab": { - "name": "Mixtral-Experiment", - "provenance": [] - } - }, - "nbformat_minor": 0, - "nbformat": 4, - "cells": [ - { - "cell_type": "code", - "source": [ - "from huggingface_hub import login\n", - "\n", - "# Fetch Hugging Face username and token from Colab secrets\n", - "HF_USERNAME = \"pritam3355\"\n", - "HF_TOKEN = \"HF_TOKEN\"\n", - "\n", - "# Login to Hugging Face\n", - "try:\n", - " login(token=HF_TOKEN)\n", - "except ValueError:\n", - " # If token is not valid or found, login with username and token\n", - " # (likely requires manual authorization)\n", - " login(username=HF_USERNAME, token=HF_TOKEN)" - ], - "metadata": { - "_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5", - "_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19", - "trusted": true, - "execution": { - "iopub.status.busy": "2024-10-18T18:20:43.271188Z", - "iopub.execute_input": "2024-10-18T18:20:43.271841Z", - "iopub.status.idle": "2024-10-18T18:20:43.353056Z", - "shell.execute_reply.started": "2024-10-18T18:20:43.271801Z", - "shell.execute_reply": "2024-10-18T18:20:43.35218Z" - }, - "id": "H5JWFz2XAAak", - "outputId": "af45db86-89f6-4349-c2d9-15d969f3d3f2" - }, - "outputs": [ - { - "name": "stdout", - "text": "The token has not been saved to the git credentials helper. Pass `add_to_git_credential=True` in this function directly or `--add-to-git-credential` if using via `huggingface-cli` if you want to set the git credential as well.\nToken is valid (permission: fineGrained).\nYour token has been saved to /root/.cache/huggingface/token\nLogin successful\n", - "output_type": "stream" - } - ], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "Here Using Mixtral model to extract and analyze how the input sequence is processed in Forward pass. Mixtral is similar to Mistal model but has more parameters" - ], - "metadata": { - "id": "bLD_CkBUAQMy" - } - }, - { - "cell_type": "code", - "source": [ - "import torch\n", - "from transformers import AutoModelForCausalLM, AutoTokenizer\n", - "\n", - "model_id = \"mistralai/Mixtral-8x7B-v0.1\"\n", - "tokenizer = AutoTokenizer.from_pretrained(model_id,device=\"auto\")\n", - "\n", - "model = AutoModelForCausalLM.from_pretrained(model_id,trust_remote_code=True,\n", - " torch_dtype=torch.bfloat16,\n", - " low_cpu_mem_usage=True,device_map=\"auto\")\n" - ], - "metadata": { - "trusted": true, - "execution": { - "iopub.status.busy": "2024-10-18T18:20:43.354757Z", - "iopub.execute_input": "2024-10-18T18:20:43.355493Z", - "iopub.status.idle": "2024-10-18T18:30:40.651163Z", - "shell.execute_reply.started": "2024-10-18T18:20:43.355448Z", - "shell.execute_reply": "2024-10-18T18:30:40.650377Z" - }, - "id": "E3nm2iWcAAaq", - "outputId": "82d9a608-7eff-4578-b328-f9f773ed4f39", - "colab": { - "referenced_widgets": [ - "fa5c2b7f05bc412993098a3731e72989", - "e64a4b6de34d4f40b88305ce507e3658", - "4675ed906a964735b4334458935ab4b9", - "e4f24bafae8f4397b76818a34ca9d6e4", - "3621e97c28544d34ab3953c22d227cd0", - "dd02aa16c10b4ab78373aa3dae939489", - "44e75ecc95b74f03a7a58e6ea21165c1", - "6d26de44c0334077b6c14104747a48ad", - "57c7fa8051a94bcb96c0309651ab8298", - "b736720173fd4ba5bbe54cbcc1177423", - "368fe041fff84949ac30d3d45ac78a0d", - "79ff492b16e946c8a6238d31b181ffc8", - "2a12b5905b434c11beaaceaf7e1a6394", - "9f16b85fde7148b7931c30fb024c87d5", - "f0bae3fc9925442e82d58ecd7a305808", - "2181a83c39114bc78b1e4859b3ccdfed", - "14ad494e78084d8983bc6c0751f9d941", - "280600190e10484db98261256542f236", - "562e9f5c0d0d4228b218553019e483b6", - "cc6675e71cea4018b6adff29d60f0a82", - "39633f760e104265b1ddc2bcb3e4961d", - "64288ea1c3074a528339b9d0f9729d18", - "584114fa6b554a1495f6aa14011e0cc6", - "2756416bfbcf474c94c1ca2ab4b7d8e3", - "8c6e4f33682040feb42c1385c66b7ba2", - "68cc9722525c46328cf963c2a4f2740a", - "06367bbf0c094ba1bc7d481fb1bfc3f9", - "1434b26ed3b4449b8fd6a76e0f1e5c97" - ] - } - }, - "outputs": [ - { - "output_type": "display_data", - "data": { - "text/plain": "tokenizer_config.json: 0%| | 0.00/967 [00:00" - }, - "metadata": {} - } - ], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "## Forward Pass" - ], - "metadata": { - "id": "vORmhXXTAAa3" - } - }, - { - "cell_type": "code", - "source": [ - "input_text = \"The quick brown fox jumps over the lazy dog !\"\n", - "\n", - "# Tokenize the input text\n", - "inputs = tokenizer(input_text, return_tensors=\"pt\")\n", - "print(\"Tokenized inputs {'input_ids','attention_mask'} - \",inputs)\n", - "print(\"Decoded tokens : \",tokenizer.decode(inputs['input_ids'][0]))" - ], - "metadata": { - "trusted": true, - "execution": { - "iopub.status.busy": "2024-10-18T18:30:40.706383Z", - "iopub.execute_input": "2024-10-18T18:30:40.706895Z", - "iopub.status.idle": "2024-10-18T18:30:40.728093Z", - "shell.execute_reply.started": "2024-10-18T18:30:40.706863Z", - "shell.execute_reply": "2024-10-18T18:30:40.727243Z" - }, - "id": "dcC4RjNTAAa4", - "outputId": "f0d4c2e3-b7f6-471d-9b9b-ce5316c47431" - }, - "outputs": [ - { - "name": "stdout", - "text": "Tokenized inputs {'input_ids','attention_mask'} - {'input_ids': tensor([[ 1, 415, 2936, 9060, 285, 1142, 461, 10575, 754, 272,\n 17898, 3914, 918]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])}\nDecoded tokens : The quick brown fox jumps over the lazy dog !\n", - "output_type": "stream" - } - ], - "execution_count": null - }, - { - "cell_type": "code", - "source": [ - "\n", - "with torch.no_grad():\n", - " model_output = model(**inputs)" - ], - "metadata": { - "trusted": true, - "execution": { - "iopub.status.busy": "2024-10-18T18:30:40.729287Z", - "iopub.execute_input": "2024-10-18T18:30:40.729872Z", - "iopub.status.idle": "2024-10-18T18:36:43.660892Z", - "shell.execute_reply.started": "2024-10-18T18:30:40.72983Z", - "shell.execute_reply": "2024-10-18T18:36:43.660087Z" - }, - "id": "4x2A5-m-AAa6", - "outputId": "d0fc43d2-1229-4582-d4d3-6b5f745be24e" - }, - "outputs": [ - { - "name": "stderr", - "text": "Starting from v4.46, the `logits` model output will have the same type as the model (except at train time, where it will always be FP32)\n", - "output_type": "stream" - } - ], - "execution_count": null - }, - { - "cell_type": "code", - "source": [ - "for layer, output in outputs.items():\n", - " print(f\"Output at {layer}: \")\n", - " if isinstance(output, torch.Tensor):\n", - " print(output.shape, type(output))\n", - " elif isinstance(output, tuple):\n", - " for i, o in enumerate(output):\n", - " print(f\"Output {i}: {o.shape if isinstance(o, torch.Tensor) else type(o)}\")\n", - " else:\n", - " print(type(output))\n", - " print(\"-\" * 100)" - ], - "metadata": { - "trusted": true, - "execution": { - "iopub.status.busy": "2024-10-18T18:56:11.28238Z", - "iopub.execute_input": "2024-10-18T18:56:11.283252Z", - "iopub.status.idle": "2024-10-18T18:56:11.291437Z", - "shell.execute_reply.started": "2024-10-18T18:56:11.283214Z", - "shell.execute_reply": "2024-10-18T18:56:11.290478Z" - }, - "id": "xVuaYV3pAAa7", - "outputId": "84e2f36e-0f10-4be1-9fdd-581fe61fabb1" - }, - "outputs": [ - { - "name": "stdout", - "text": "Output at embed_tokens: \ntorch.Size([1, 13, 4096]) \n----------------------------------------------------------------------------------------------------\nOutput at self_attn_layer_1: \nOutput 0: torch.Size([1, 13, 4096])\nOutput 1: \nOutput 2: \n----------------------------------------------------------------------------------------------------\nOutput at block_sparse_moe_experts: \ntorch.Size([3, 4096]) \n----------------------------------------------------------------------------------------------------\nOutput at post_attention_layernorm: \ntorch.Size([1, 13, 4096]) \n----------------------------------------------------------------------------------------------------\nOutput at norm: \ntorch.Size([1, 13, 4096]) \n----------------------------------------------------------------------------------------------------\nOutput at lm_head: \ntorch.Size([1, 13, 32000]) \n----------------------------------------------------------------------------------------------------\nOutput at input_layernorm: \ntorch.Size([1, 13, 4096]) \n----------------------------------------------------------------------------------------------------\nOutput at self_attn_q_proj: \ntorch.Size([1, 13, 4096]) \n----------------------------------------------------------------------------------------------------\nOutput at self_attn_k_proj: \ntorch.Size([1, 13, 1024]) \n----------------------------------------------------------------------------------------------------\nOutput at self_attn_v_proj: \ntorch.Size([1, 13, 1024]) \n----------------------------------------------------------------------------------------------------\nOutput at self_attn_o_proj: \ntorch.Size([1, 13, 4096]) \n----------------------------------------------------------------------------------------------------\nOutput at block_sparse_moe_gate: \ntorch.Size([13, 8]) \n----------------------------------------------------------------------------------------------------\n", - "output_type": "stream" - } - ], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "\n", - "\n", - "### Explanation of Shapes:\n", - "\n", - "### 1. **embed_tokens**\n", - "- **Shape:** `torch.Size([1, 13, 4096])`\n", - "- **Explanation:**\n", - " - `1`: Batch size (number of sequences in this batch, here it's 1 sequence).\n", - " - `13`: Sequence length (the number of tokens in the input, here 13 tokens).\n", - " - `4096`: Embedding size (each token is mapped to a 4096-dimensional vector).\n", - " \n", - " **Shape Format:** `(batch_size, seq_len, embed_dim)`\n", - "\n", - "### 2. **self_attn_layer_1**\n", - "- **Shape (Output 0):** `torch.Size([1, 13, 4096])`\n", - "- **Explanation:**\n", - " - `1`: Batch size.\n", - " - `13`: Sequence length.\n", - " - `4096`: Hidden size (output of the attention mechanism).\n", - "\n", - " **Shape Format:** `(batch_size, seq_len, hidden_dim)`\n", - "\n", - "### 3. **block_sparse_moe_experts**\n", - "- **Shape:** `torch.Size([3, 4096])`\n", - "- **Explanation:**\n", - " - `3`: Number of activated experts for the MoE (Mixture of Experts) layer. In the `block_sparse_moe`, two experts are chosen per token (2 tokens in the batch may activate the same experts, hence the 3).\n", - " - `4096`: Expert embedding size (the dimensionality of the expert's output).\n", - " \n", - " **Shape Format:** `(num_experts, expert_embed_dim)`\n", - "\n", - "### 4. **post_attention_layernorm**\n", - "- **Shape:** `torch.Size([1, 13, 4096])`\n", - "- **Explanation:**\n", - " - `1`: Batch size.\n", - " - `13`: Sequence length.\n", - " - `4096`: Output dimension after the post-attention normalization step.\n", - " \n", - " **Shape Format:** `(batch_size, seq_len, hidden_dim)`\n", - "\n", - "### 5. **norm**\n", - "- **Shape:** `torch.Size([1, 13, 4096])`\n", - "- **Explanation:**\n", - " - `1`: Batch size.\n", - " - `13`: Sequence length.\n", - " - `4096`: Output dimension after applying the final normalization layer.\n", - " \n", - " **Shape Format:** `(batch_size, seq_len, hidden_dim)`\n", - "\n", - "### 6. **lm_head**\n", - "- **Shape:** `torch.Size([1, 13, 32000])`\n", - "- **Explanation:**\n", - " - `1`: Batch size.\n", - " - `13`: Sequence length.\n", - " - `32000`: Vocabulary size (logits over the vocabulary for each token in the sequence).\n", - " \n", - " **Shape Format:** `(batch_size, seq_len, vocab_size)`\n", - "\n", - "### 7. **input_layernorm**\n", - "- **Shape:** `torch.Size([1, 13, 4096])`\n", - "- **Explanation:**\n", - " - `1`: Batch size.\n", - " - `13`: Sequence length.\n", - " - `4096`: Output dimension after the input layer normalization step.\n", - " \n", - " **Shape Format:** `(batch_size, seq_len, hidden_dim)`\n", - "\n", - "### 8. **self_attn_q_proj**\n", - "- **Shape:** `torch.Size([1, 13, 4096])`\n", - "- **Explanation:**\n", - " - `1`: Batch size.\n", - " - `13`: Sequence length.\n", - " - `4096`: Query projection size (the hidden state is projected to the query vector space).\n", - " \n", - " **Shape Format:** `(batch_size, seq_len, hidden_dim)`\n", - "\n", - "### 9. **self_attn_k_proj**\n", - "- **Shape:** `torch.Size([1, 13, 1024])`\n", - "- **Explanation:**\n", - " - `1`: Batch size.\n", - " - `13`: Sequence length.\n", - " - `1024`: Key projection size (here, the key is projected to a smaller dimensional space compared to queries/values).\n", - " \n", - " **Shape Format:** `(batch_size, seq_len, key_dim)`\n", - "\n", - "### 10. **self_attn_v_proj**\n", - "- **Shape:** `torch.Size([1, 13, 1024])`\n", - "- **Explanation:**\n", - " - `1`: Batch size.\n", - " - `13`: Sequence length.\n", - " - `1024`: Value projection size (the values are also projected to the same size as the keys).\n", - " \n", - " **Shape Format:** `(batch_size, seq_len, value_dim)`\n", - "\n", - "### 11. **self_attn_o_proj**\n", - "- **Shape:** `torch.Size([1, 13, 4096])`\n", - "- **Explanation:**\n", - " - `1`: Batch size.\n", - " - `13`: Sequence length.\n", - " - `4096`: Output projection size (the final result after the attention mechanism is projected back to the original hidden dimension).\n", - " \n", - " **Shape Format:** `(batch_size, seq_len, hidden_dim)`\n", - "\n", - "### 12. **block_sparse_moe_gate**\n", - "- **Shape:** `torch.Size([13, 8])`\n", - "- **Explanation:**\n", - " - `13`: Sequence length (the gate operates per token).\n", - " - `8`: Number of experts (gating decisions are made over all available experts).\n", - " \n", - " **Shape Format:** `(seq_len, num_experts)`\n", - "\n", - "### Summary Table:\n", - "\n", - "| Layer Name | Shape Format | Dimensions | Notes |\n", - "|------------------------------|------------------------------------|------------|------------------------------------------------------------|\n", - "| `embed_tokens` | `(batch_size, seq_len, embed_dim)` | `[1, 13, 4096]` | Embedding tokens from vocabulary. |\n", - "| `self_attn_layer_1` | `(batch_size, seq_len, hidden_dim)`| `[1, 13, 4096]` | Output of first attention layer. |\n", - "| `block_sparse_moe_experts` | `(num_experts, expert_embed_dim)` | `[3, 4096]` | Expert outputs in MoE block. |\n", - "| `post_attention_layernorm` | `(batch_size, seq_len, hidden_dim)`| `[1, 13, 4096]` | Layer norm after attention. |\n", - "| `norm` | `(batch_size, seq_len, hidden_dim)`| `[1, 13, 4096]` | Final normalization layer. |\n", - "| `lm_head` | `(batch_size, seq_len, vocab_size)`| `[1, 13, 32000]` | Logits for each token over the vocabulary. |\n", - "| `input_layernorm` | `(batch_size, seq_len, hidden_dim)`| `[1, 13, 4096]` | Input layer normalization. |\n", - "| `self_attn_q_proj` | `(batch_size, seq_len, hidden_dim)`| `[1, 13, 4096]` | Query projection in self-attention. |\n", - "| `self_attn_k_proj` | `(batch_size, seq_len, key_dim)` | `[1, 13, 1024]` | Key projection in self-attention. |\n", - "| `self_attn_v_proj` | `(batch_size, seq_len, value_dim)` | `[1, 13, 1024]` | Value projection in self-attention. |\n", - "| `self_attn_o_proj` | `(batch_size, seq_len, hidden_dim)`| `[1, 13, 4096]` | Output projection after attention. |\n", - "| `block_sparse_moe_gate` | `(seq_len, num_experts)` | `[13, 8]` | Gating decisions for the mixture of experts. |\n", - "\n" - ], - "metadata": { - "id": "FyugDOzXAAa8" - } - } - ] -} \ No newline at end of file diff --git a/llm_experiments/README.md b/llm_experiments/README.md deleted file mode 100644 index 06283bf31a07..000000000000 --- a/llm_experiments/README.md +++ /dev/null @@ -1,89 +0,0 @@ - - - -# LLM-Experiment Series - -Welcome to the LL-Experiment series! This series of notebooks and scripts aims to provide a comprehensive guide on investigating the internal workings of Large Language Models (LLMs), understanding how they process inputs, and experimenting with their architectures. - -## Table of Contents - -- [Introduction](#introduction) -- [Series Overview](#series-overview) -- [Getting Started](#getting-started) -- [Notebooks and Scripts](#notebooks-and-scripts) -- [Contributing](#contributing) -- [License](#license) - -## Introduction - -Large Language Models (LLMs) have revolutionized the field of natural language processing (NLP) by achieving state-of-the-art performance on various tasks. However, understanding their internal workings and how they process inputs can be challenging. This series aims to demystify LLMs by providing detailed explanations, hands-on experiments, and practical tips for tweaking their architectures. - -## Series Overview - -The LLM-Experiment series will cover the following topics: - -1. **Understanding LLM Architectures**: - - An overview of popular open source LLM architectures like Whisper, Llama, and Mixtral. - - Key trouble shooting during experimentation - -2. **Investigating Input Processing**: - - How inputs are tokenized and embedded. - - The role of attention mechanisms in processing sequences. - - Visualizing and analyzing the outputs at various layers of the model. - -3. **Tweaking LLM Architectures**: - - Experimenting with different configurations and hyperparameters. - - Modifying existing LLM architectures to improve performance or adapt to specific tasks. - - Implementing custom layers and components. - -4. **Conducting New Experiments**: - - Designing and implementing new experiments to test hypotheses about LLM behavior. - - Evaluating the impact of architectural changes on model performance. - - Sharing insights and findings with the community. - -## Getting Started - -To get started with the LLM-Experiment series, you will need the following: - -1. **Python Environment**: - - All these notebooks are created in Kaggle or Google Colab, So it's recommended to use the same to reproduce the results for other models - - -2. **Hugging Face Account**: - - Create a Hugging Face account and obtain an API token. - - Login to Hugging Face using the provided token or username and token. - - Most of the Mistral,Llama models needs some sort of Agreement acceptance - -3. **Notebooks and Scripts**: - - Clone this repository to access the notebooks and scripts or you can directly open in Google Colab - - Follow the instructions in each notebook to run the experiments and analyze the results. - -## Notebooks and Scripts - -The series will include the following notebooks and scripts: - -1. **Mixtral Model Analysis**: - - Analyzing the architecture and configuration of the Mixtral model. - - Registering hooks to capture the outputs at various layers. - -2. **Input Processing and Embedding**: - Upcoming - - -3. **Attention Mechanisms and improvements**: - Upcoming - - -4. **Rolling Buffer,KV-cache,Sliding Window Attention**: - Upcoming - - -5. **Tweaking Model Architectures - Adapters,Down-Casting**: - Upcoming - - -## Contributing - -We welcome contributions from the community! If you have any ideas, suggestions, or improvements, please feel free to open an issue or submit a pull request. - -## License - -This project is licensed under the MIT License. See the [LICENSE](LICENSE) file for more details. - - From f3d43e869454a7ffbfdfdbe4d41b0635b85081eb Mon Sep 17 00:00:00 2001 From: Pritam Das <69068731+Pritam3355@users.noreply.github.com> Date: Sat, 19 Oct 2024 10:11:04 +0530 Subject: [PATCH 5/8] Create README.md --- neural_network/chatbot/README.md | 55 ++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 neural_network/chatbot/README.md diff --git a/neural_network/chatbot/README.md b/neural_network/chatbot/README.md new file mode 100644 index 000000000000..f7c216561074 --- /dev/null +++ b/neural_network/chatbot/README.md @@ -0,0 +1,55 @@ +# Chatbot with Chat history stored in Database + +This project is a simple chatbot application built using Python, integrating a database for chat history storage and a language model service to generate responses. The chatbot can handle user messages, manage chat history, and terminate conversations upon receiving a `/stop` command. + +## Features +- **Conversation Handling**: The bot processes user inputs and generates responses using a language model service. +- **Database Integration**: Stores chat data (user messages and bot responses) and maintains chat history. +- **Session Management**: Supports starting and terminating chat sessions, including proper logging of start and end times. +- **Message Truncation**: Limits conversation history to the last few messages if the conversation exceeds a large number of entries. + +## Components +- **`Chatbot` Class**: Core logic for handling user messages and managing the chat lifecycle. +- **`Database` (Mocked in tests)**: Handles chat data storage (methods for inserting and retrieving data). +- **`LLM Service` (Mocked in tests)**: Generates responses to user input based on conversation history. + +## Installation +1. Clone the repository: +2. Install the necessary dependencies + ```bash + pip3 install requirements.txt + ``` +4. Run the bot or test it using `doctest`: + ```bash + python3 -m doctest -v chatbot.py + ``` + +## Usage +1. **Create Database**: Create a databse named `ChatDB` in Mysql +2. **Create .env**: +``` + # Together API key + TOGETHER_API_KEY="YOUR_API_KEY" + + # Groq API key + GROQ_API_KEY = "YOUR_API_KEY" + + # MySQL connectionDB (if you're running locally) + DB_USER = "" + DB_PASSWORD = "" + DB_HOST = "127.0.0.1" + DB_NAME = "ChatDB" + PORT = "3306" + ``` +7. **Handling Messages**: run below command to start the chat in console, you can login to your Database to check the chat history +```python +python3 main.py +``` +10. **Ending the Chat**: When the user sends `/stop`, the chat will terminate and log the end of the conversation with the message 'conversation-terminated' + +## Testing +The code includes basic `doctests` to verify the chatbot's functionality using mock services for the database and language model: +- Run the tests: + ```bash + python3 -m doctest -v chatbot.py + ``` From 2dad12b898385d16e4e1a075d2613922ac62e0d5 Mon Sep 17 00:00:00 2001 From: Pritam Das <69068731+Pritam3355@users.noreply.github.com> Date: Sat, 19 Oct 2024 10:12:46 +0530 Subject: [PATCH 6/8] Add files via upload --- neural_network/chatbot/chatbot.py | 134 ++++++++++++++++ neural_network/chatbot/db.py | 199 ++++++++++++++++++++++++ neural_network/chatbot/llm_service.py | 78 ++++++++++ neural_network/chatbot/main.py | 44 ++++++ neural_network/chatbot/requirements.txt | 57 +++++++ 5 files changed, 512 insertions(+) create mode 100644 neural_network/chatbot/chatbot.py create mode 100644 neural_network/chatbot/db.py create mode 100644 neural_network/chatbot/llm_service.py create mode 100644 neural_network/chatbot/main.py create mode 100644 neural_network/chatbot/requirements.txt diff --git a/neural_network/chatbot/chatbot.py b/neural_network/chatbot/chatbot.py new file mode 100644 index 000000000000..38488349ff82 --- /dev/null +++ b/neural_network/chatbot/chatbot.py @@ -0,0 +1,134 @@ +import datetime +from typing import List, Dict, Any + + +class Chatbot: + """ + A Chatbot class to manage chat conversations using an LLM service and a database to store chat data. + + Methods: + - start_chat: Starts a new conversation, logs the start time. + - handle_user_message: Processes user input and stores user message & bot response in DB. + - end_chat: Ends the conversation and logs the end time. + - continue_chat: Retains only the last few messages if the conversation exceeds 1000 messages. + """ + + def __init__(self, db: Any, llm_service: Any) -> None: + """ + Initialize the Chatbot with a database and an LLM service. + + Parameters: + - db: The database instance used for storing chat data. + - llm_service: The language model service for generating responses. + """ + self.db = db + self.llm_service = llm_service + self.conversation_history: List[Dict[str, str]] = [] + self.chat_id_pk: int = None + + def start_chat(self) -> None: + """ + Start a new chat session and insert chat history to the database. + """ + start_time = datetime.datetime.now() + is_stream = 1 # Start new conversation + self.db.insert_chat_history(start_time, is_stream) + self.chat_id_pk = self.db.get_latest_chat_id() + + def handle_user_message(self, user_input: str) -> str: + """ + Handle user input and generate a bot response. + If the user sends '/stop', the conversation is terminated. + + Parameters: + - user_input: The input provided by the user. + + Returns: + - bot_response: The response generated by the bot. + + Raises: + - ValueError: If user input is not a string or if no chat_id is available. + + Doctest: + >>> class MockDatabase: + ... def __init__(self): + ... self.data = [] + ... def insert_chat_data(self, *args, **kwargs): + ... pass + ... def insert_chat_history(self, *args, **kwargs): + ... pass + ... def get_latest_chat_id(self): + ... return 1 + ... + >>> class MockLLM: + ... def generate_response(self, conversation_history): + ... if conversation_history[-1]["content"] == "/stop": + ... return "conversation-terminated" + ... return "Mock response" + >>> db_mock = MockDatabase() + >>> llm_mock = MockLLM() + >>> bot = Chatbot(db_mock, llm_mock) + >>> bot.start_chat() + >>> bot.handle_user_message("/stop") + 'conversation-terminated' + >>> bot.handle_user_message("Hello!") + 'Mock response' + """ + if not isinstance(user_input, str): + raise ValueError("User input must be a string.") + + if self.chat_id_pk is None: + raise ValueError("Chat has not been started. Call start_chat() first.") + + self.conversation_history.append({"role": "user", "content": user_input}) + + if user_input == "/stop": + self.end_chat() + return "conversation-terminated" + else: + bot_response = self.llm_service.generate_response(self.conversation_history) + print(f"Bot : ",bot_response) + self.conversation_history.append( + {"role": "assistant", "content": bot_response} + ) + self._store_message_in_db(user_input, bot_response) + + return bot_response + + def _store_message_in_db(self, user_input: str, bot_response: str) -> None: + """ + Store user input and bot response in the database. + + Parameters: + - user_input: The message from the user. + - bot_response: The response generated by the bot. + + Raises: + - ValueError: If insertion into the database fails. + """ + try: + self.db.insert_chat_data(self.chat_id_pk, user_input, bot_response) + except Exception as e: + raise ValueError(f"Failed to insert chat data: {e}") + + def end_chat(self) -> None: + """ + End the chat session and update the chat history in the database. + """ + current_time = datetime.datetime.now() + is_stream = 2 # End of conversation + try: + user_input = "/stop" + bot_response = "conversation-terminated" + print(f"Bot : ",bot_response) + self.db.insert_chat_data(self.chat_id_pk, user_input, bot_response) + self.db.insert_chat_history(current_time, is_stream) + except Exception as e: + raise ValueError(f"Failed to update chat history: {e}") + + def continue_chat(self) -> None: + """ + Retain only the last few entries if the conversation exceeds 1000 messages. + """ + if len(self.conversation_history) > 1000: + self.conversation_history = self.conversation_history[-3:] diff --git a/neural_network/chatbot/db.py b/neural_network/chatbot/db.py new file mode 100644 index 000000000000..92ef6909cabe --- /dev/null +++ b/neural_network/chatbot/db.py @@ -0,0 +1,199 @@ +import os +from dotenv import load_dotenv +import mysql.connector +from mysql.connector import MySQLConnection + +load_dotenv() + + +class Database: + """ + A class to manage the connection to the MySQL database using configuration from environment variables. + + Attributes: + ----------- + config : dict + The database connection parameters like user, password, host, and database name. + """ + + def __init__(self) -> None: + self.config = { + "user": os.environ.get("DB_USER"), + "password": os.environ.get("DB_PASSWORD"), + "host": os.environ.get("DB_HOST"), + "database": os.environ.get("DB_NAME"), + } + + def connect(self) -> MySQLConnection: + """ + Establish a connection to the MySQL database. + + Returns: + -------- + MySQLConnection + A connection object for interacting with the MySQL database. + + Raises: + ------- + mysql.connector.Error + If the connection to the database fails. + """ + return mysql.connector.connect(**self.config) + + +class ChatDatabase: + """ + A class to manage chat-related database operations, such as creating tables, + inserting chat history, and retrieving chat data. + + Attributes: + ----------- + db : Database + An instance of the `Database` class for establishing connections to the MySQL database. + """ + + def __init__(self, db: Database) -> None: + self.db = db + + def create_tables(self) -> None: + """ + Create the necessary tables for chat history and chat data in the database. + If the tables already exist, they will not be created again. + + Raises: + ------- + mysql.connector.Error + If there is any error executing the SQL statements. + """ + conn = self.db.connect() + cursor = conn.cursor() + + cursor.execute( + """ + CREATE TABLE IF NOT EXISTS ChatDB.Chat_history ( + chat_id INT AUTO_INCREMENT PRIMARY KEY, + start_time DATETIME, + is_stream INT + ) + """ + ) + + cursor.execute( + """ + CREATE TABLE IF NOT EXISTS ChatDB.Chat_data ( + id INT AUTO_INCREMENT PRIMARY KEY, + chat_id INT, + user TEXT, + assistant TEXT, + FOREIGN KEY (chat_id) REFERENCES ChatDB.Chat_history(chat_id) + ) + """ + ) + + cursor.execute("DROP TRIGGER IF EXISTS update_is_stream") + + cursor.execute( + """ + CREATE TRIGGER update_is_stream + AFTER UPDATE ON ChatDB.Chat_history + FOR EACH ROW + BEGIN + UPDATE ChatDB.Chat_data + SET is_stream = NEW.is_stream + WHERE chat_id = NEW.chat_id; + END; + """ + ) + + conn.commit() + cursor.close() + conn.close() + + def insert_chat_history(self, start_time: str, is_stream: int) -> None: + """ + Insert a new chat history record into the database. + + Parameters: + ----------- + start_time : str + The starting time of the chat session. + is_stream : int + An integer indicating whether the chat is in progress (1) or ended (2). + + Raises: + ------- + mysql.connector.Error + If there is any error executing the SQL statements. + """ + conn = self.db.connect() + cursor = conn.cursor() + cursor.execute( + """ + INSERT INTO ChatDB.Chat_history (start_time, is_stream) + VALUES (%s, %s) + """, + (start_time, is_stream), + ) + conn.commit() + cursor.close() + conn.close() + + def get_latest_chat_id(self) -> int: + """ + Retrieve the chat ID of the most recent chat session from the database. + + Returns: + -------- + int + The ID of the latest chat session. + + Raises: + ------- + mysql.connector.Error + If there is any error executing the SQL statements. + """ + conn = self.db.connect() + cursor = conn.cursor() + cursor.execute( + """ + SELECT chat_id FROM ChatDB.Chat_history WHERE + chat_id=(SELECT MAX(chat_id) FROM ChatDB.Chat_history) + """ + ) + chat_id_pk = cursor.fetchone()[0] + cursor.close() + conn.close() + return chat_id_pk + + def insert_chat_data( + self, chat_id: int, user_message: str, assistant_message: str + ) -> None: + """ + Insert a new chat data record into the database. + + Parameters: + ----------- + chat_id : int + The ID of the chat session to which this data belongs. + user_message : str + The message provided by the user in the chat session. + assistant_message : str + The response from the assistant in the chat session. + + Raises: + ------- + mysql.connector.Error + If there is any error executing the SQL statements. + """ + conn = self.db.connect() + cursor = conn.cursor() + cursor.execute( + """ + INSERT INTO ChatDB.Chat_data (chat_id, user, assistant) + VALUES (%s, %s, %s) + """, + (chat_id, user_message, assistant_message), + ) + conn.commit() + cursor.close() + conn.close() diff --git a/neural_network/chatbot/llm_service.py b/neural_network/chatbot/llm_service.py new file mode 100644 index 000000000000..f1203f642332 --- /dev/null +++ b/neural_network/chatbot/llm_service.py @@ -0,0 +1,78 @@ +import os +from together import Together +from groq import Groq +from dotenv import load_dotenv +from typing import List, Dict + +load_dotenv() + + +class LLMService: + """ + A class to interact with different LLM (Large Language Model) API services, such as Together and Groq. + + Attributes: + ----------- + api_service : str + The name of the API service to use ("Together" or "Groq"). + """ + + def __init__(self, api_service: str) -> None: + """ + Initialize the LLMService with a specific API service. + + Parameters: + ----------- + api_service : str + The name of the LLM API service, either "Together" or "Groq". + """ + self.api_service = api_service + + def generate_response(self, conversation_history: List[Dict[str, str]]) -> str: + """ + Generate a response from the specified LLM API based on the conversation history. + + Parameters: + ----------- + conversation_history : List[Dict[str, str]] + The list of conversation messages, where each message is a dictionary with 'role' and 'content' keys. + + Returns: + -------- + str + The generated response content from the assistant. + + Raises: + ------- + ValueError + If the specified API service is neither "Together" nor "Groq". + """ + if self.api_service == "Together": + client = Together(api_key=os.environ.get("TOGETHER_API_KEY")) + response = client.chat.completions.create( + model="meta-llama/Llama-3.2-3B-Instruct-Turbo", + messages=conversation_history, + max_tokens=512, + temperature=0.3, + top_p=0.7, + top_k=50, + repetition_penalty=1, + stop=["<|eot_id|>", "<|eom_id|>"], + stream=False, + ) + elif self.api_service == "Groq": + client = Groq(api_key=os.environ.get("GROQ_API_KEY")) + response = client.chat.completions.create( + model="llama3-8b-8192", + messages=conversation_history, + max_tokens=1024, + temperature=0.3, + top_p=0.7, + stop=["<|eot_id|>", "<|eom_id|>"], + stream=False, + ) + else: + raise ValueError(f"Unsupported API service: {self.api_service}") + + # Extracting the content of the generated response + return response.choices[0].message.content diff --git a/neural_network/chatbot/main.py b/neural_network/chatbot/main.py new file mode 100644 index 000000000000..cdbd631c7783 --- /dev/null +++ b/neural_network/chatbot/main.py @@ -0,0 +1,44 @@ +from db import Database, ChatDatabase +from llm_service import LLMService +from chatbot import Chatbot +from typing import NoReturn + + +def main() -> NoReturn: + """ + Main function to initialize and start the chatbot application. + + This function initializes the database and LLM service, creates necessary tables, and starts + the chatbot for user interaction. + """ + # Initialize and configure the database + db = Database() + chat_db = ChatDatabase(db) + chat_db.create_tables() + + # Set the API service to either "Together" or "Groq" + api_service = ( + "Groq" # Can be set dynamically based on user preference or environment + ) + llm_service = LLMService(api_service) + + # Initialize the Chatbot with the database and LLM service + chatbot = Chatbot(chat_db, llm_service) + + print("Welcome to the chatbot! Type '/stop' to end the conversation.") + chatbot.start_chat() + + # Chat loop to handle user input + while True: + user_input = input("\nYou: ") + if user_input.strip().lower() == "/stop": + chatbot.end_chat() # End the conversation if user types "/stop" + break + chatbot.handle_user_message( + user_input + ) # Process user input and generate response + chatbot.continue_chat() # Handle long conversations (trim history if necessary) + + +if __name__ == "__main__": + main() diff --git a/neural_network/chatbot/requirements.txt b/neural_network/chatbot/requirements.txt new file mode 100644 index 000000000000..0f1204243a5d --- /dev/null +++ b/neural_network/chatbot/requirements.txt @@ -0,0 +1,57 @@ +aiohappyeyeballs==2.4.2 +aiohttp==3.10.8 +aiosignal==1.3.1 +annotated-types==0.7.0 +anyio==4.6.0 +asgiref==3.8.1 +attrs==24.2.0 +black==24.10.0 +certifi==2024.8.30 +cfgv==3.4.0 +charset-normalizer==3.3.2 +click==8.1.7 +distlib==0.3.9 +distro==1.9.0 +Django==5.1.1 +djangorestframework==3.15.2 +eval_type_backport==0.2.0 +filelock==3.16.1 +frozenlist==1.4.1 +groq==0.11.0 +h11==0.14.0 +httpcore==1.0.5 +httpx==0.27.2 +identify==2.6.1 +idna==3.10 +markdown-it-py==3.0.0 +mdurl==0.1.2 +multidict==6.1.0 +mypy-extensions==1.0.0 +mysql-connector-python==9.0.0 +nodeenv==1.9.1 +numpy==2.1.1 +packaging==24.1 +pathspec==0.12.1 +pillow==10.4.0 +platformdirs==4.3.6 +pre_commit==4.0.1 +pyarrow==17.0.0 +pydantic==2.9.2 +pydantic_core==2.23.4 +Pygments==2.18.0 +python-dotenv==1.0.1 +PyYAML==6.0.2 +requests==2.32.3 +rich==13.8.1 +ruff==0.7.0 +shellingham==1.5.4 +sniffio==1.3.1 +sqlparse==0.5.1 +tabulate==0.9.0 +together==1.3.0 +tqdm==4.66.5 +typer==0.12.5 +typing_extensions==4.12.2 +urllib3==2.2.3 +virtualenv==20.27.0 +yarl==1.13.1 From 4ecdca19d673d721c5c744a238a437e5b3c72146 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 19 Oct 2024 04:49:54 +0000 Subject: [PATCH 7/8] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- neural_network/chatbot/README.md | 4 ++-- neural_network/chatbot/chatbot.py | 4 ++-- neural_network/chatbot/db.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/neural_network/chatbot/README.md b/neural_network/chatbot/README.md index f7c216561074..5e8f86b0c6fd 100644 --- a/neural_network/chatbot/README.md +++ b/neural_network/chatbot/README.md @@ -30,10 +30,10 @@ This project is a simple chatbot application built using Python, integrating a d ``` # Together API key TOGETHER_API_KEY="YOUR_API_KEY" - + # Groq API key GROQ_API_KEY = "YOUR_API_KEY" - + # MySQL connectionDB (if you're running locally) DB_USER = "" DB_PASSWORD = "" diff --git a/neural_network/chatbot/chatbot.py b/neural_network/chatbot/chatbot.py index 38488349ff82..2456ced5ee35 100644 --- a/neural_network/chatbot/chatbot.py +++ b/neural_network/chatbot/chatbot.py @@ -87,7 +87,7 @@ def handle_user_message(self, user_input: str) -> str: return "conversation-terminated" else: bot_response = self.llm_service.generate_response(self.conversation_history) - print(f"Bot : ",bot_response) + print(f"Bot : ", bot_response) self.conversation_history.append( {"role": "assistant", "content": bot_response} ) @@ -120,7 +120,7 @@ def end_chat(self) -> None: try: user_input = "/stop" bot_response = "conversation-terminated" - print(f"Bot : ",bot_response) + print(f"Bot : ", bot_response) self.db.insert_chat_data(self.chat_id_pk, user_input, bot_response) self.db.insert_chat_history(current_time, is_stream) except Exception as e: diff --git a/neural_network/chatbot/db.py b/neural_network/chatbot/db.py index 92ef6909cabe..3572a699ea57 100644 --- a/neural_network/chatbot/db.py +++ b/neural_network/chatbot/db.py @@ -156,7 +156,7 @@ def get_latest_chat_id(self) -> int: cursor = conn.cursor() cursor.execute( """ - SELECT chat_id FROM ChatDB.Chat_history WHERE + SELECT chat_id FROM ChatDB.Chat_history WHERE chat_id=(SELECT MAX(chat_id) FROM ChatDB.Chat_history) """ ) From f8510d702eea331a94663769e84bd6959d25537b Mon Sep 17 00:00:00 2001 From: Pritam Das <69068731+Pritam3355@users.noreply.github.com> Date: Sat, 19 Oct 2024 10:31:17 +0530 Subject: [PATCH 8/8] Add files via upload --- neural_network/chatbot/chatbot.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/neural_network/chatbot/chatbot.py b/neural_network/chatbot/chatbot.py index 2456ced5ee35..9de733c94e21 100644 --- a/neural_network/chatbot/chatbot.py +++ b/neural_network/chatbot/chatbot.py @@ -83,11 +83,13 @@ def handle_user_message(self, user_input: str) -> str: self.conversation_history.append({"role": "user", "content": user_input}) if user_input == "/stop": + bot_response = "conversation-terminated" + # print(f"Bot: {bot_response}") self.end_chat() - return "conversation-terminated" + return bot_response else: bot_response = self.llm_service.generate_response(self.conversation_history) - print(f"Bot : ", bot_response) + # print(f"Bot: {bot_response}") self.conversation_history.append( {"role": "assistant", "content": bot_response} ) @@ -120,7 +122,6 @@ def end_chat(self) -> None: try: user_input = "/stop" bot_response = "conversation-terminated" - print(f"Bot : ", bot_response) self.db.insert_chat_data(self.chat_id_pk, user_input, bot_response) self.db.insert_chat_history(current_time, is_stream) except Exception as e: