diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index 58b0fc0985..0000000000 --- a/.dockerignore +++ /dev/null @@ -1,60 +0,0 @@ -# See https://help.github.com/ignore-files/ for more about ignoring files. - -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# Distribution / packaging -dist/ -build/ -*.egg-info/ -*.egg - -# Virtual environments -.env -.env.sh -venv/ -ENV/ - -# IDE-specific files -.vscode/ -.idea/ - -# Compiled Python modules -*.pyc -*.pyo -*.pyd - -# Python testing -.pytest_cache/ -.ruff_cache/ -.coverage -.mypy_cache/ - -# macOS specific files -.DS_Store - -# Windows specific files -Thumbs.db - -# this application's specific files -archive - -# any log file -*log.txt -todo -scratchpad - -# Ignore GPT Engineer files -projects -!projects/example - -# Pyenv -.python-version - -# Benchmark files -benchmark -!benchmark/*/prompt - -.gpte_consent diff --git a/.env.example b/.env.example new file mode 100644 index 0000000000..514b9dd0a0 --- /dev/null +++ b/.env.example @@ -0,0 +1,37 @@ +# https://console.groq.com/keys +GROQ_API_KEY=APIKEYGOESHERE +# https://platform.openai.com/account/api-keys +OPENAI_API_KEY=APIKEYGOESHERE +# https://serper.dev/ +SERPER_API=APIKEYGOESHERE +# Brave Search API Key (Serper is the default, brave is an alternative option for search) +BRAVE_SEARCH_API_KEY=APIKEYGOESHERE + + +# OPTIONAL - Set LAN GPU server, examples: +# PC | http://localhost:11434/v1 +# LAN GPU server | http://192.168.1.100:11434/v1 +OLLAMA_BASE_URL=http://localhost:11434/v1 + +# OPTIONAL - Rate Limiting: https://console.upstash.com/redis +UPSTASH_REDIS_REST_URL=https://EXAMPLE.upstash.io +UPSTASH_REDIS_REST_TOKEN=APIKEYGOESHERE + +# OPTIONAL - Google Search API Key (Serper is the default, brave is an alternative) +GOOGLE_SEARCH_API_KEY=APIKEYGOESHERE +GOOGLE_CX=VALUEGOESHERE + +# OPTIONAL - Portkey API Key & Bedrock Virtual Key/Provder API Keys +PORTKEY_API_KEY=APIKEYGOESHERE +PORTKEY_BEDROCK_VIRTUAL_KEY=APIKEYGOESHERE + +# OPTIONAL - Spotify +SPOTIFY_CLIENT_ID=APIKEYGOESHERE +SPOTIFY_CLIENT_SECRET=APIKEYGOESHERE + +# OPTIONAL - AWS Bedrock +AWS_ACCESS_KEY_ID=APIKEYGOESHERE +AWS_SECRET_ACCESS_KEY=APIKEYGOESHERE + +# OPTIONAL - FAL.AI (Stable Diffusion 3) +FAL_KEY=APIKEYGOESHERE \ No newline at end of file diff --git a/.env.template b/.env.template deleted file mode 100644 index ffdc5bb6e2..0000000000 --- a/.env.template +++ /dev/null @@ -1,5 +0,0 @@ -### OpenAI Setup ### - -# OPENAI_API_KEY=Your personal OpenAI API key from https://platform.openai.com/account/api-keys -OPENAI_API_KEY=... -ANTHROPIC_API_KEY=... diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS deleted file mode 100644 index 55dfea91d9..0000000000 --- a/.github/CODEOWNERS +++ /dev/null @@ -1 +0,0 @@ -.github/workflows/ @ATheorell diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md deleted file mode 100644 index 7713a6b3df..0000000000 --- a/.github/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,131 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -We as members, contributors, and leaders pledge to make participation in our -community a harassment-free experience for everyone, regardless of age, body -size, visible or invisible disability, ethnicity, sex characteristics, gender -identity or expression, level of experience, education, socio-economic status, -nationality, personal appearance, race, caste, color, religion, or sexual -identity and orientation. - -We pledge to act and interact in ways that contribute to an open, welcoming, -diverse, inclusive, and healthy community. - -## Our Standards - -Examples of behavior that contributes to a positive environment for our -community include: - -* Demonstrating empathy and kindness toward other people -* Being respectful of differing opinions, viewpoints, and experiences -* Giving and gracefully accepting constructive feedback -* Accepting responsibility and apologizing to those affected by our mistakes, - and learning from the experience -* Focusing on what is best not just for us as individuals, but for the overall - community - -Examples of unacceptable behavior include: - -* The use of sexualized language or imagery, and sexual attention or advances of - any kind -* Trolling, insulting or derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or email address, - without their explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Enforcement Responsibilities - -Community leaders are responsible for clarifying and enforcing our standards of -acceptable behavior and will take appropriate and fair corrective action in -response to any behavior that they deem inappropriate, threatening, offensive, -or harmful. - -Community leaders have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions that are -not aligned to this Code of Conduct, and will communicate reasons for moderation -decisions when appropriate. - -## Scope - -This Code of Conduct applies within all community spaces, and also applies when -an individual is officially representing the community in public spaces. -Examples of representing our community include using an official e-mail address, -posting using an official social media account, or acting as an appointed -representative at an online or offline event. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported to the community leaders responsible for enforcement at -. -All complaints will be reviewed and investigated promptly and fairly. - -All community leaders are obligated to respect the privacy and security of reporters of incidents. - -## Enforcement Guidelines - -Community leaders will follow these Community Impact Guidelines in determining -the consequences for any action they deem in violation of this Code of Conduct: - -### 1. Correction - -**Community Impact**: Use of inappropriate language or other behavior deemed -unprofessional or unwelcome in the community. - -**Consequence**: A private, written warning from community leaders, providing -clarity around the nature of the violation and an explanation of why the -behavior was inappropriate. A public apology may be requested. - -### 2. Warning - -**Community Impact**: A violation through a single incident or series of -actions. - -**Consequence**: A warning with consequences for continued behavior. No -interaction with the people involved, including unsolicited interaction with -those enforcing the Code of Conduct, for a specified period of time. This -includes avoiding interactions in community spaces as well as external channels -like social media. Violating these terms may lead to a temporary or permanent -ban. - -### 3. Temporary Ban - -**Community Impact**: A serious violation of community standards, including -sustained inappropriate behavior. - -**Consequence**: A temporary ban from any sort of interaction or public -communication with the community for a specified period of time. No public or -private interaction with the people involved, including unsolicited interaction -with those enforcing the Code of Conduct, is allowed during this period. -Violating these terms may lead to a permanent ban. - -### 4. Permanent Ban - -**Community Impact**: Demonstrating a pattern of violation of community -standards, including sustained inappropriate behavior, harassment of an -individual, or aggression toward or disparagement of classes of individuals. - -**Consequence**: A permanent ban from any sort of public interaction within the -community. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 2.1, available at -[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. - -Community Impact Guidelines were inspired by -[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. - -For answers to common questions about this code of conduct, see the FAQ at -[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at -[https://www.contributor-covenant.org/translations][translations]. - -[homepage]: https://www.contributor-covenant.org -[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html -[Mozilla CoC]: https://github.com/mozilla/diversity -[FAQ]: https://www.contributor-covenant.org/faq -[translations]: https://www.contributor-covenant.org/translations diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md deleted file mode 100644 index dbc4b4dec1..0000000000 --- a/.github/CONTRIBUTING.md +++ /dev/null @@ -1,135 +0,0 @@ -# Contributing to gpt-engineer - -The gpt-engineer is a community project and lives from your contributions - they are warmly appreciated. The main contribution avenues are: -- Pull request: implement code and have it reviewed and potentially merged by the maintainers. Implementations of existing feature requests or fixes to bug reports are likely to be merged. -- Bug report: report when something in gpt-engineer doesn't work. Do not report errors in programs written _by_ gpt-engineer. -- Feature request: provide a detailed sketch about something you want to have implemented in gpt-engineer. There is no guarantee that features will be implemented. -- Discussion: raise awareness of a potential improvement. This is often a good starting point before making a detailed feature request. - -By participating in this project, you agree to abide by the [code of conduct](https://github.com/gpt-engineer-org/gpt-engineer/blob/main/.github/CODE_OF_CONDUCT.md). - -## Merge Policy for Pull Requests -Code that is likely to introduce breaking changes, or significantly change the user experience for users and developers, require [board approval](https://github.com/gpt-engineer-org/gpt-engineer/blob/main/GOVERNANCE.md) to be merged. Smaller code changes can be merged directly. -As a rule, cosmetic pull requests, for example rephrasing the readme or introducing more compact syntax, that do not yield clear practical improvements are not merged. Such pull requests are generally discouraged, both to save time for the maintainers and to establish a lower bar for becoming a contributor. - -## Getting Started with Pull Requests to gpt-engineer - -To get started with contributing, please follow these steps: - -1. Fork the repository and clone it to your local machine. -2. Install any necessary dependencies. -3. Create a new branch for your changes: `git checkout -b my-branch-name`. -4. Make your desired changes or additions. -5. Run the tests to ensure everything is working as expected. -6. Commit your changes: `git commit -m "Descriptive commit message"`. -7. Push to the branch: `git push origin my-branch-name`. -8. Submit a pull request to the `main` branch of the original repository. - -## Code Style - -Please make sure to follow the established code style guidelines for this project. Consistent code style helps maintain readability and makes it easier for others to contribute to the project. - -To enforce this we use [`pre-commit`](https://pre-commit.com/) to run [`black`](https://black.readthedocs.io/en/stable/index.html) and [`ruff`](https://beta.ruff.rs/docs/) on every commit. - -To install gpt-engineer as a developer, clone the repository and install the dependencies with: - -```bash -$ poetry install -$ poetry shell -``` - -And then install the `pre-commit` hooks with: - -```bash -$ pre-commit install - -# output: -pre-commit installed at .git/hooks/pre-commit -``` - -If you are not familiar with the concept of [git hooks](https://git-scm.com/docs/githooks) and/or [`pre-commit`](https://pre-commit.com/) please read the documentation to understand how they work. - -As an introduction of the actual workflow, here is an example of the process you will encounter when you make a commit: - -Let's add a file we have modified with some errors, see how the pre-commit hooks run `black` and fails. -`black` is set to automatically fix the issues it finds: - -```bash -$ git add chat_to_files.py -$ git commit -m "commit message" -black....................................................................Failed -- hook id: black -- files were modified by this hook - -reformatted chat_to_files.py - -All done! ✨ 🍰 ✨ -1 file reformatted. -``` - -You can see that `chat_to_files.py` is both staged and not staged for commit. This is because `black` has formatted it and now it is different from the version you have in your working directory. To fix this you can simply run `git add chat_to_files.py` again and now you can commit your changes. - -```bash -$ git status -On branch pre-commit-setup -Changes to be committed: - (use "git restore --staged ..." to unstage) - modified: chat_to_files.py - -Changes not staged for commit: - (use "git add ..." to update what will be committed) - (use "git restore ..." to discard changes in working directory) - modified: chat_to_files.py -``` - -Now let's add the file again to include the latest commits and see how `ruff` fails. - -```bash -$ git add chat_to_files.py -$ git commit -m "commit message" -black....................................................................Passed -ruff.....................................................................Failed -- hook id: ruff -- exit code: 1 -- files were modified by this hook - -Found 2 errors (2 fixed, 0 remaining). -``` - -Same as before, you can see that `chat_to_files.py` is both staged and not staged for commit. This is because `ruff` has formatted it and now it is different from the version you have in your working directory. To fix this you can simply run `git add chat_to_files.py` again and now you can commit your changes. - -```bash -$ git add chat_to_files.py -$ git commit -m "commit message" -black....................................................................Passed -ruff.....................................................................Passed -fix end of files.........................................................Passed -[pre-commit-setup f00c0ce] testing - 1 file changed, 1 insertion(+), 1 deletion(-) -``` - -Now your file has been committed and you can push your changes. - -At the beginning this might seem like a tedious process (having to add the file again after `black` and `ruff` have modified it) but it is actually very useful. It allows you to see what changes `black` and `ruff` have made to your files and make sure that they are correct before you commit them. - -### Important Note When `pre-commit` Fails in the Build Pipeline -Sometimes `pre-commit` will seemingly run successfully, as follows: - -```bash -black................................................(no files to check)Skipped -ruff.................................................(no files to check)Skipped -check toml...........................................(no files to check)Skipped -check yaml...........................................(no files to check)Skipped -detect private key...................................(no files to check)Skipped -fix end of files.....................................(no files to check)Skipped -trim trailing whitespace.............................(no files to check)Skipped -``` - -However, you may see `pre-commit` fail in the build pipeline upon submitting a PR. The solution to this is to run `pre-commit run --all-files` to force `pre-commit` to execute these checks, and make any necessary file modifications, to all files. - - -## Licensing - -By contributing to gpt-engineer, you agree that your contributions will be licensed under the [LICENSE](https://github.com/gpt-engineer-org/gpt-engineer/blob/main/LICENSE) file of the project. - -Thank you for your interest in contributing to gpt-engineer! We appreciate your support and look forward to your contributions. diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index e7deadaff8..a0587697a2 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1,4 +1 @@ -# These are supported funding model platforms - -github: [antonosika] -patreon: gpt_eng +github: [developersdigest] diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md deleted file mode 100644 index 99d451c3a7..0000000000 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve -title: '' -labels: bug, triage -assignees: '' - ---- - -## Policy and info - - Maintainers will close issues that have been stale for 14 days if they contain relevant answers. - - Adding the label "sweep" will automatically turn the issue into a coded pull request. Works best for mechanical tasks. More info/syntax at: https://docs.sweep.dev/ - -## Expected Behavior - -Please describe the behavior you are expecting. - -## Current Behavior - -What is the current behavior? - -## Failure Information - -Information about the failure, including environment details, such as LLM used. - -### Failure Logs - -If your project includes a debug_log_file.txt, kindly upload it from your_project/.gpteng/memory/ directory. This file encompasses all the necessary logs. Should the file prove extensive, consider utilizing GitHub's "add files" functionality. - -## System Information - -Please copy and paste the output of the `gpte --sysinfo` command as part of your bug report. - -## Installation Method - -Please specify whether you installed GPT-Engineer using `pip install` or by building the repository. diff --git a/.github/ISSUE_TEMPLATE/documentation-clarification.md b/.github/ISSUE_TEMPLATE/documentation-clarification.md deleted file mode 100644 index 66de382e5d..0000000000 --- a/.github/ISSUE_TEMPLATE/documentation-clarification.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -name: Documentation improvement -about: Inaccuracies, inadequacies in the docs pages -title: '' -labels: documentation, triage -assignees: '' - ---- - -## Policy and info - - Maintainers will close issues that have been stale for 14 days if they contain relevant answers. - - Adding the label "sweep" will automatically turn the issue into a coded pull request. Works best for mechanical tasks. More info/syntax at: https://docs.sweep.dev/ - - -## Description -A clear and concise description of how the documentation at https://gpt-engineer.readthedocs.io/en/latest/ is providing wrong/insufficient information. - -## Suggestion -How can it be improved diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md deleted file mode 100644 index 31e6ea2776..0000000000 --- a/.github/ISSUE_TEMPLATE/feature-request.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project -title: '' -labels: enhancement, triage -assignees: '' - ---- - -## Policy and info - - Maintainers will close issues that have been stale for 14 days if they contain relevant answers. - - Adding the label "sweep" will automatically turn the issue into a coded pull request. Works best for mechanical tasks. More info/syntax at: https://docs.sweep.dev/ - - Consider adding the label "good first issue" for interesting, but easy features. - -## Feature description -A clear and concise description of what you would like to have - -## Motivation/Application -Why is this feature useful? diff --git a/.github/PULL_REQUEST_TEMPLATE/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 8343ddb0d4..0000000000 --- a/.github/PULL_REQUEST_TEMPLATE/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,9 +0,0 @@ -**YOU MAY DELETE THE ENTIRE TEMPLATE BELOW.** - -## How Has This Been Tested? - -Please describe if you have either: - -- Generated the "example" project -- Ran the entire benchmark suite -- Something else diff --git a/.github/workflows/automation.yml b/.github/workflows/automation.yml deleted file mode 100644 index 1f232730d0..0000000000 --- a/.github/workflows/automation.yml +++ /dev/null @@ -1,29 +0,0 @@ -name: Automation Workflow - -on: - schedule: - - cron: '0 0 * * *' - issues: - types: [opened, edited, reopened] - pull_request: - types: [opened, edited, reopened] - -jobs: - mark-stale-issues: - runs-on: ubuntu-latest - steps: - - name: Mark stale issues - uses: actions/stale@v4 - with: - stale-issue-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs.' - days-before-stale: 60 - - # Add additional jobs as needed - # job-name: - # runs-on: ubuntu-latest - # steps: - # - name: Job step name - # uses: action-name@version - # with: - # parameter1: value1 - # diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml deleted file mode 100644 index 94fae70786..0000000000 --- a/.github/workflows/ci.yaml +++ /dev/null @@ -1,49 +0,0 @@ -name: Tox pytest all python versions - -on: - push: - branches: [ main ] - paths: - - gpt_engineer/** - - tests/** - pull_request: - branches: [ main ] - -concurrency: - group: ${{ github.workflow }} - ${{ github.ref }} - cancel-in-progress: true - -jobs: - test: - runs-on: ubuntu-latest - strategy: - matrix: - python-version: ['3.10', '3.11', '3.12'] - - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version == '3.12' && '3.12.3' || matrix.python-version }} # Using 3.12.3 to resolve Pydantic ForwardRef issue - cache: 'pip' # Note that pip is for the tox level. Poetry is still used for installing the specific environments (tox.ini) - - - name: Check Python Version - run: python --version - - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install tox==4.15.0 poetry - - - name: Run tox - env: - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - run: tox - - # Temporarily disabling codecov until we resolve codecov rate limiting issue - # - name: Report coverage - # run: | - # bash <(curl -s https://codecov.io/bash) diff --git a/.github/workflows/pre-commit.yaml b/.github/workflows/pre-commit.yaml deleted file mode 100644 index 1ef45cc277..0000000000 --- a/.github/workflows/pre-commit.yaml +++ /dev/null @@ -1,22 +0,0 @@ -name: pre-commit - -on: - pull_request: - push: - branches: [main] - -jobs: - pre-commit: - runs-on: ubuntu-latest - - permissions: - contents: write - - steps: - - uses: actions/checkout@v3 - - - uses: actions/setup-python@v4 - - - uses: pre-commit/action@v3.0.0 - with: - extra_args: --all-files diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml deleted file mode 100644 index 5044c1f4da..0000000000 --- a/.github/workflows/release.yaml +++ /dev/null @@ -1,73 +0,0 @@ -name: Build and publish Python packages to PyPI - -on: - workflow_dispatch: - release: - types: - - published - -jobs: - build: - runs-on: ubuntu-latest - strategy: - matrix: - python-version: - - "3.10" - steps: - - uses: actions/checkout@v3 - - - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - # Removed the cache line that was here - - # Install Poetry - - name: Install Poetry - run: | - curl -sSL https://install.python-poetry.org | python3 - - - # Add Poetry to PATH - - name: Add Poetry to PATH - run: echo "$HOME/.local/bin" >> $GITHUB_PATH - - # Cache Poetry's dependencies based on the lock file - - name: Set up Poetry cache - uses: actions/cache@v3 - with: - path: ~/.cache/pypoetry - key: ${{ runner.os }}-poetry-${{ hashFiles('**/poetry.lock') }} - restore-keys: | - ${{ runner.os }}-poetry- - - # Install dependencies using Poetry (if any) - - name: Install dependencies - run: poetry install - - # Build package using Poetry - - name: Build package - run: poetry build --format sdist - - # Upload package as build artifact - - uses: actions/upload-artifact@v3 - with: - name: package - path: dist/ - - publish: - runs-on: ubuntu-latest - needs: build - environment: - name: pypi - url: https://pypi.org/p/gpt-engineer - permissions: - id-token: write - steps: - - uses: actions/download-artifact@v3 - with: - name: package - path: dist/ - - - name: Publish packages to PyPI - uses: pypa/gh-action-pypi-publish@release/v1 - with: - password: ${{ secrets.PYPI_API_TOKEN }} diff --git a/.gitignore b/.gitignore index c0c793b88e..f2b567e0aa 100644 --- a/.gitignore +++ b/.gitignore @@ -1,98 +1,12 @@ -# See https://help.github.com/ignore-files/ for more about ignoring files. - -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class -.history/ - -# Distribution / packaging -dist/ -build/ -*.egg-info/ -*.egg - -# Virtual environments -.env -.env.sh -venv/ -ENV/ -venv_test_installation/ - -# IDE-specific files -.vscode/ -.idea/ - -# Compiled Python modules -*.pyc -*.pyo -*.pyd - -# Python testing -.pytest_cache/ -.ruff_cache/ -.mypy_cache/ -.coverage -coverage.* - -# macOS specific files .DS_Store - -# Windows specific files -Thumbs.db - -# this application's specific files -archive - -# any log file -*log.txt -todo -scratchpad - -# Pyenv -.python-version - -.gpte_consent - -# projects folder apart from default prompt - -projects/* -!projects/example/prompt -!projects/example-improve -!projects/example-vision - -# docs - -docs/_build -docs/applications -docs/benchmark -docs/cli -docs/core -docs/intro -docs/tools - -# coding assistants -.aider* -.gpteng - -# webapp specific -webapp/node_modules -webapp/package-lock.json - -webapp/.next/ - -.langchain.db - -# TODO files -/!todo* - -#ignore tox files -.tox - -# locally saved datasets -gpt_engineer/benchmark/benchmarks/apps/dataset -gpt_engineer/benchmark/benchmarks/mbpp/dataset - -gpt_engineer/benchmark/minimal_bench_config.toml - -test.json +node_modules +.turbo +*.log +.next +*.local +.env +.cache +.turbo +.vercel +.vscode +.idea \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml deleted file mode 100644 index 3fe0daa264..0000000000 --- a/.pre-commit-config.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# See https://pre-commit.com for more information -# See https://pre-commit.com/hooks.html for more hooks -fail_fast: true -default_stages: [commit] - -repos: - - repo: https://github.com/psf/black - rev: 23.3.0 - hooks: - - id: black - args: [--config, pyproject.toml] - types: [python] - - - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: "v0.0.272" - hooks: - - id: ruff - args: [--fix, --exit-non-zero-on-fix] - - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 - hooks: - - id: check-toml - - id: check-yaml - - id: detect-private-key - - id: end-of-file-fixer - - id: trailing-whitespace diff --git a/.readthedocs.yaml b/.readthedocs.yaml deleted file mode 100644 index 995eb517ec..0000000000 --- a/.readthedocs.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# .readthedocs.yaml -# Read the Docs configuration file -# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details - -# Required -version: 2 - -# Set the OS, Python version and other tools you might need -build: - os: ubuntu-22.04 - tools: - python: "3.11" - # You can also specify other tool versions: - # nodejs: "19" - # rust: "1.64" - # golang: "1.19" - jobs: - post_create_environment: - - pip install poetry - post_install: - - VIRTUAL_ENV=$READTHEDOCS_VIRTUALENV_PATH poetry install --with docs - pre_build: - - python docs/create_api_rst.py - -# Build documentation in the "docs/" directory with Sphinx -sphinx: - configuration: docs/conf.py - -# Optionally build your docs in additional formats such as PDF and ePub -# formats: -# - pdf -# - epub - -# Optional but recommended, declare the Python requirements required -# to build your documentation -# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html -#python: -# install: -# - requirements: docs/requirements.txt diff --git a/Acknowledgements.md b/Acknowledgements.md deleted file mode 100644 index 4f1a2cb730..0000000000 --- a/Acknowledgements.md +++ /dev/null @@ -1,5 +0,0 @@ -# We thank the following people for inspiration - -| Person | Content | File(s) | Source | -|----|---|---|---| -| Paul Gauthier | The prompt for the `improve code` step is strongly based on Paul's prompt in Aider | /preprompts/improve.txt | https://github.com/paul-gauthier/aider/blob/main/aider/coders/editblock_coder.py diff --git a/DISCLAIMER.md b/DISCLAIMER.md deleted file mode 100644 index 17fd588fb6..0000000000 --- a/DISCLAIMER.md +++ /dev/null @@ -1,11 +0,0 @@ -# Disclaimer - -gpt-engineer is an experimental application and is provided "as-is" without any warranty, express or implied. By using this software, you agree to assume all risks associated with its use, including but not limited to data loss, system failure, or any other issues that may arise. - -The developers and contributors of this project do not accept any responsibility or liability for any losses, damages, or other consequences that may occur as a result of using this software. You are solely responsible for any decisions and actions taken based on the information provided by gpt-engineer. - -Please note that the use of the GPT-4 language model can be expensive due to its token usage. By utilizing this project, you acknowledge that you are responsible for monitoring and managing your own token usage and the associated costs. It is highly recommended to check your OpenAI API usage regularly and set up any necessary limits or alerts to prevent unexpected charges. - -As an autonomous experiment, gpt-engineer may generate code or take actions that are not in line with real-world business practices or legal requirements. It is your responsibility to ensure that any actions or decisions made by the generated code comply with all applicable laws, regulations, and ethical standards. The developers and contributors of this project shall not be held responsible for any consequences arising from the use of this software. - -By using gpt-engineer, you agree to indemnify, defend, and hold harmless the developers, contributors, and any affiliated parties from and against any and all claims, damages, losses, liabilities, costs, and expenses (including reasonable attorneys' fees) arising from your use of this software or your violation of these terms. diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000..932439e8c7 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,20 @@ +FROM node + +EXPOSE 3000/tcp + +RUN apt-get update && \ +apt-get upgrade -y && \ +rm -rf -- /var/lib/apt && \ +npm install -g bun + +USER node + +WORKDIR /home/node + +RUN git clone https://github.com/developersdigest/llm-answer-engine.git app && \ +cd app && \ +bun install + +WORKDIR /home/node/app + +ENTRYPOINT ["/usr/local/bin/bun", "run", "dev"] diff --git a/GOVERNANCE.md b/GOVERNANCE.md deleted file mode 100644 index f621698a95..0000000000 --- a/GOVERNANCE.md +++ /dev/null @@ -1,76 +0,0 @@ -# Governance Model of GPT-Engineer - -## I. Project Board Structure - -### Project Board - -The Project Board is the central decision-making body for the project, overseeing both strategic and technical decisions of the open source project GPT-Engineer. - -#### Composition: -- The Board consists of the project's founder, Anton Osika, and representatives from each significant contributing entity, including individual contributors and commercial partners. -- The board is restricted to a maximum of 7 seats. -- New board members are admitted by majority vote. -- Board members may be expelled by majority vote. - -## II. Roles and Responsibilities - -### Veto due to Ethical Considerations -- The founder has veto right over any decisions made by the Board. -- This veto power is a safeguard to ensure the project's direction remains true to its original vision and ethos. - -### Contribution-Conditioned Decision Making -- Each board member has one vote as long as they qualify as active contributors. -- To qualify as an active contributor, a board member or the entity they represent, must have made 6 significant contributions on the GPT-Engineer GitHub page over the past 90 days. -- A significant contribution is: - - A merged pull request with at least 3 lines of code. - - Engagement in a GitHub/Discord bug report, where the board members' input leads to the confirmed resolution of the bug. If the solution is in terms of a merged pull request, the bug resolution together with the merged pull request counts as one significant contribution. - - A non-code, but necessary, community activity agreed on by the board, such as administration, corporate design, workflow design etc, deemed to take more than 1 hour. Participation in meetings or discussions does not count as a significant contribution. -- A board member may retain its seat on the board without voting right. - -## III. Decision-Making Process - -### Majority Voting -- Decisions are made based on a simple majority vote. Majority means more than half of board members with voting rights agree on one decision, regardless of the number of choices. -- The founder's veto can override the majority decision if exercised. - -### Regular Meetings and Reporting -- The Board will convene regularly, with the frequency of meetings decided by the Board members. -- Decisions, discussion points, and contributions will be transparently documented and shared within the project community. - -## IV. Data Access and Confidentiality - -### Board Members' Right to Access Data -- Any confidential data collected by GPT-Engineer is accessible to the board members after signing a relevant non-disclosure agreement (NDA). -- A relevant NDA requires a board member to erase any copies of confidential data obtained by the time of leaving the board. - -## V. Scope of Voting - -### Essential Topics -- Board voting is restricted to essential topics. -- Essential topics include essential technical topics and essential community topics. -- An essential technical topic is a change in the GPT-engineer code base that is likely to introduce breaking changes, or significantly change the user experience for users and developers. -- Essential community topics are changes to the community's governance or other central policy documents such as the readme or license. -- Day-to-day tasks such as bug fixes or implementation of new features outside the core module do not require voting. - -## VI. Transparency - -### Commitment to Transparency -- The governance process will be transparent, with key decisions, meeting minutes, and voting results publicly available, except for sensitive or confidential matters. - -## VII. Amendments - -### Changes to Governance Structure -- The governance model can be revised as the project evolves. Proposals for changes can be made by any Board member and will require a majority vote for adoption. - -## VIII. The GPT-Engineer Brand - -### Copyright and Stewardship -- The creator of GPT-engineer (Anton Osika) will be the steward of the GPT-engineer brand to decide when and how it can be used, and is committed to never jeopardizing the interest of the open source community in this stewardship. -- Anton Osika possesses the exclusive intellectual property rights for the trademark 'GPT-engineer,' encompassing all case variations such as 'gpt-engineer,' 'GPT-engineer,' and 'GPTE.' This ownership extends to the exclusive legal authority to utilize the 'GPT-engineer' trademark in the establishment and branding of both commercial and non-profit entities. It includes, but is not limited to, the use of the trademark in business names, logos, marketing materials, and other forms of corporate identity. Any use of the 'GPT-engineer' trademark, in any of its case variations, by other parties for commercial or non-commercial purposes requires express permission or a license agreement from Anton Osika. - -# Current Board Members -- Anton Osika -- Axel Theorell -- Corey Gallon -- Peter Harrington -- Theo McCabe diff --git a/LICENSE b/LICENSE index a56867b9bd..94f45a3a72 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2023 Anton Osika +Copyright (c) 2024 Developers Digest Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index d575b3d243..0000000000 --- a/MANIFEST.in +++ /dev/null @@ -1 +0,0 @@ -recursive-include gpt_engineer/preprompts * diff --git a/Makefile b/Makefile deleted file mode 100644 index c66a67e63a..0000000000 --- a/Makefile +++ /dev/null @@ -1,52 +0,0 @@ -#Sets the default shell for executing commands as /bin/bash and specifies command should be executed in a Bash shell. -SHELL := /bin/bash - -# Color codes for terminal output -COLOR_RESET=\033[0m -COLOR_CYAN=\033[1;36m -COLOR_GREEN=\033[1;32m - -# Defines the targets help, install, dev-install, and run as phony targets. -.PHONY: help install run - -#sets the default goal to help when no target is specified on the command line. -.DEFAULT_GOAL := help - -#Disables echoing of commands. -.SILENT: - -#Sets the variable name to the second word from the MAKECMDGOALS. -name := $(word 2,$(MAKECMDGOALS)) - -#Defines a target named help. -help: - @echo "Please use 'make ' where is one of the following:" - @echo " help Return this message with usage instructions." - @echo " install Will install the dependencies using Poetry." - @echo " run Runs GPT Engineer on the folder with the given name." - -#Defines a target named install. This target will install the project using Poetry. -install: poetry-install install-pre-commit farewell - -#Defines a target named poetry-install. This target will install the project dependencies using Poetry. -poetry-install: - @echo -e "$(COLOR_CYAN)Installing project with Poetry...$(COLOR_RESET)" && \ - poetry install - -#Defines a target named install-pre-commit. This target will install the pre-commit hooks. -install-pre-commit: - @echo -e "$(COLOR_CYAN)Installing pre-commit hooks...$(COLOR_RESET)" && \ - poetry run pre-commit install - -#Defines a target named farewell. This target will print a farewell message. -farewell: - @echo -e "$(COLOR_GREEN)All done!$(COLOR_RESET)" - -#Defines a target named run. This target will run GPT Engineer on the folder with the given name. -run: - @echo -e "$(COLOR_CYAN)Running GPT Engineer on $(COLOR_GREEN)$(name)$(COLOR_CYAN) folder...$(COLOR_RESET)" && \ - poetry run gpt-engineer projects/$(name) - -# Counts the lines of code in the project -cloc: - cloc . --exclude-dir=node_modules,dist,build,.mypy_cache,benchmark --exclude-list-file=.gitignore --fullpath --not-match-d='docs/_build' --by-file diff --git a/README.md b/README.md index 172210af7f..7a88c2cc72 100644 --- a/README.md +++ b/README.md @@ -1,129 +1,223 @@ -# gpt-engineer +

Perplexity-Inspired LLM Answer Engine

+
+ +
+ developersdigest%2Fllm-answer-engine | Trendshift +
+
+
+
+
+ +This repository contains the code and instructions needed to build a sophisticated answer engine that leverages the capabilities of [Groq](https://www.groq.com/), [Mistral AI's Mixtral](https://mistral.ai/news/mixtral-of-experts/), [Langchain.JS](https://js.langchain.com/docs/), [Brave Search](https://search.brave.com/), [Serper API](https://serper.dev/), and [OpenAI](https://openai.com/). Designed to efficiently return sources, answers, images, videos, and follow-up questions based on user queries, this project is an ideal starting point for developers interested in natural language processing and search technologies. + +## YouTube Tutorials + + + + +## Technologies Used + +- **Next.js**: A React framework for building server-side rendered and static web applications. +- **Tailwind CSS**: A utility-first CSS framework for rapidly building custom user interfaces. +- **Vercel AI SDK**: The Vercel AI SDK is a library for building AI-powered streaming text and chat UIs. +- **Groq & Mixtral**: Technologies for processing and understanding user queries. +- **Langchain.JS**: A JavaScript library focused on text operations, such as text splitting and embeddings. +- **Brave Search**: A privacy-focused search engine used for sourcing relevant content and images. +- **Serper API**: Used for fetching relevant video and image results based on the user's query. +- **OpenAI Embeddings**: Used for creating vector representations of text chunks. +- **Cheerio**: Utilized for HTML parsing, allowing the extraction of content from web pages. +- **Ollama (Optional)**: Used for streaming inference and embeddings. +- **Upstash Redis Rate Limiting (Optional)**: Used for setting up rate limiting for the application. +- **Upstash Semantic Cache (Optional)**: Used for caching data for faster response times. -[![GitHub Repo stars](https://img.shields.io/github/stars/gpt-engineer-org/gpt-engineer?style=social)](https://github.com/gpt-engineer-org/gpt-engineer) -[![Discord Follow](https://dcbadge.vercel.app/api/server/8tcDQ89Ej2?style=flat)](https://discord.gg/8tcDQ89Ej2) -[![License](https://img.shields.io/github/license/gpt-engineer-org/gpt-engineer)](https://github.com/gpt-engineer-org/gpt-engineer/blob/main/LICENSE) -[![GitHub Issues or Pull Requests](https://img.shields.io/github/issues/gpt-engineer-org/gpt-engineer)](https://github.com/gpt-engineer-org/gpt-engineer/issues) -![GitHub Release](https://img.shields.io/github/v/release/gpt-engineer-org/gpt-engineer) -[![Twitter Follow](https://img.shields.io/twitter/follow/antonosika?style=social)](https://twitter.com/antonosika) +## Getting Started -The OG code genereation experimentation platform! +### Prerequisites -If you are looking for the evolution that is an opinionated, managed service – check out gptengineer.app. +- Obtain API keys from OpenAI, Groq, Brave Search, and Serper. -If you are looking for a well maintained hackable CLI for – check out aider. +#### Prerequisites for Non-Docker Installation +- Ensure Node.js and npm are installed on your machine. -gpt-engineer lets you: -- Specify software in natural language -- Sit back and watch as an AI writes and executes the code -- Ask the AI to implement improvements +#### Prerequisites for Docker Installation -## Getting Started +- Ensure Docker and docker compose are installed on your machine. + +### Obtaining API Keys + +- **OpenAI API Key**: [Generate your OpenAI API key here](https://platform.openai.com/account/api-keys). +- **Groq API Key**: [Get your Groq API key here](https://console.groq.com/keys). +- **Brave Search API Key**: [Obtain your Brave Search API key here](https://brave.com/search/api/). +- **Serper API Key**: [Get your Serper API key here](https://serper.dev/). + +### Quick Clone and Deploy + +Simple, Easy, Fast and Free - deploy to vercel + +> Make Sure to fill all the API Keys required for the Installation. -### Install gpt-engineer +[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fdevelopersdigest%2Fllm-answer-engine&env=OPENAI_API_KEY,GROQ_API_KEY,BRAVE_SEARCH_API_KEY,SERPER_API&envDescription=ALL%20API%20KEYS%20are%20needed%20for%20this%20application.%20If%20you%20are%20not%20using%20OpenAI%20KEY%20and%20Using%20Groq%20Instead%2C%20then%20just%20enter%20a%20random%20string%20in%20the%20OpenAI%20Key%20section%20so%20it%20wont%20generate%20any%20error%20while%20building%20the%20project.&project-name=llm-answer-engine&repository-name=llm-answer-engine&skippable-integrations=1) -For **stable** release: -- `python -m pip install gpt-engineer` -For **development**: -- `git clone https://github.com/gpt-engineer-org/gpt-engineer.git` -- `cd gpt-engineer` -- `poetry install` -- `poetry shell` to activate the virtual environment +### Installation -We actively support Python 3.10 - 3.12. The last version to support Python 3.8 - 3.9 was [0.2.6](https://pypi.org/project/gpt-engineer/0.2.6/). +1. Clone the repository: + ``` + git clone https://github.com/developersdigest/llm-answer-engine.git + ``` +2. Move in the directory + ``` + cd llm-answer-engine + ``` -### Setup API key +#### Docker Installation -Choose **one** of: -- Export env variable (you can add this to .bashrc so that you don't have to do it each time you start the terminal) - - `export OPENAI_API_KEY=[your api key]` -- .env file: - - Create a copy of `.env.template` named `.env` - - Add your OPENAI_API_KEY in .env -- Custom model: - - See [docs](https://gpt-engineer.readthedocs.io/en/latest/open_models.html), supports local model, azure, etc. +3. Edit the `docker-compose.yml` file and add your API keys -Check the [Windows README](./WINDOWS_README.md) for Windows usage. +4. Running the Server -**Other ways to run:** -- Use Docker ([instructions](docker/README.md)) -- Do everything in your browser: -[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://github.com/gpt-engineer-org/gpt-engineer/codespaces) +To start the server, execute: +``` +docker compose up -d #for v2 +``` +or +``` +docker-compose up -d #for v1 +``` +the server will be listening on the specified port. -### Create new code (default usage) -- Create an empty folder for your project anywhere on your computer -- Create a file called `prompt` (no extension) inside your new folder and fill it with instructions -- Run `gpte ` with a relative path to your folder - - For example: `gpte projects/my-new-project` from the gpt-engineer directory root with your new folder in `projects/` +#### Non-Docker Installation -### Improve existing code -- Locate a folder with code which you want to improve anywhere on your computer -- Create a file called `prompt` (no extension) inside your new folder and fill it with instructions for how you want to improve the code -- Run `gpte -i` with a relative path to your folder - - For example: `gpte projects/my-old-project -i` from the gpt-engineer directory root with your folder in `projects/` +3. Install the required dependencies: + ``` + npm install + ``` + or + ``` + bun install + ``` +4. Create a `.env` file in the root of your project and add your API keys: + ``` + OPENAI_API_KEY=your_openai_api_key + GROQ_API_KEY=your_groq_api_key + BRAVE_SEARCH_API_KEY=your_brave_search_api_key + SERPER_API=your_serper_api_key + ``` +5. Running the Server -### Benchmark custom agents -- gpt-engineer installs the binary 'bench', which gives you a simple interface for benchmarking your own agent implementations against popular public datasets. -- The easiest way to get started with benchmarking is by checking out the [template](https://github.com/gpt-engineer-org/gpte-bench-template) repo, which contains detailed instructions and an agent template. -- Currently supported benchmark: - - [APPS](https://github.com/hendrycks/apps) - - [MBPP](https://github.com/google-research/google-research/tree/master/mbpp) +To start the server, execute: +``` +npm run dev +``` +or +``` +bun run dev +``` -The community has started work with different benchmarking initiatives, as described in [this Loom](https://www.loom.com/share/206805143fbb4302b5455a5329eaab17?sid=f689608f-8e49-44f7-b55f-4c81e9dc93e6) video. +the server will be listening on the specified port. -### Research -Some of our community members have worked on different research briefs that could be taken further. See [this document](https://docs.google.com/document/d/1qmOj2DvdPc6syIAm8iISZFpfik26BYw7ZziD5c-9G0E/edit?usp=sharing) if you are interested. +## Editing the Configuration -## Terms -By running gpt-engineer, you agree to our [terms](https://github.com/gpt-engineer-org/gpt-engineer/blob/main/TERMS_OF_USE.md). +The configuration file is located in the `app/config.tsx` file. You can modify the following values +- useOllamaInference: false, +- useOllamaEmbeddings: false, +- inferenceModel: 'mixtral-8x7b-32768', +- inferenceAPIKey: process.env.GROQ_API_KEY, +- embeddingsModel: 'text-embedding-3-small', +- textChunkSize: 800, +- textChunkOverlap: 200, +- numberOfSimilarityResults: 2, +- numberOfPagesToScan: 10, +- nonOllamaBaseURL: 'https://api.groq.com/openai/v1' +- useFunctionCalling: true +- useRateLimiting: false +- useSemanticCache: false +- usePortkey: false -## Relation to gptengineer.app (GPT Engineer) -[gptengineer.app](https://gptengineer.app/) is a commercial project for the automatic generation of web apps. -It features a UI for non-technical users connected to a git-controlled codebase. -The gptengineer.app team is actively supporting the open source community. +### Function Calling Support (Beta) +Currently, function calling is supported with the following capabilities: +- Maps and Locations (Serper Locations API) +- Shopping (Serper Shopping API) +- TradingView Stock Data (Free Widget) +- Spotify (Free API) +- Any functionality that you would like to see here, please open an issue or submit a PR. +- To enable function calling and conditional streaming UI (currently in beta), ensure useFunctionCalling is set to true in the config file. -## Features +### Ollama Support (Partially supported) +Currently, streaming text responses are supported for Ollama, but follow-up questions are not yet supported. -### Pre Prompts -You can specify the "identity" of the AI agent by overriding the `preprompts` folder with your own version of the `preprompts`. You can do so via the `--use-custom-preprompts` argument. +Embeddings are supported, however, time-to-first-token can be quite long when using both a local embedding model as well as a local model for the streaming inference. I recommended decreasing a number of the RAG values specified in the `app/config.tsx` file to decrease the time-to-first-token when using Ollama. -Editing the `preprompts` is how you make the agent remember things between projects. +To get started, make sure you have the Ollama running model on your local machine and set within the config the model you would like to use and set use OllamaInference and/or useOllamaEmbeddings to true. -### Vision +Note: When 'useOllamaInference' is set to true, the model will be used for both text generation, but it will skip the follow-up questions inference step when using Ollama. -By default, gpt-engineer expects text input via a `prompt` file. It can also accept image inputs for vision-capable models. This can be useful for adding UX or architecture diagrams as additional context for GPT Engineer. You can do this by specifying an image directory with the `—-image_directory` flag and setting a vision-capable model in the second CLI argument. +More info: https://ollama.com/blog/openai-compatibility -E.g. `gpte projects/example-vision gpt-4-vision-preview --prompt_file prompt/text --image_directory prompt/images -i` +### Roadmap -### Open source, local and alternative models +- [] Add document upload + RAG for document search/retrieval +- [] Add a settings component to allow users to select the model, embeddings model, and other parameters from the UI +- [] Add support for follow-up questions when using Ollama +- [Complete] Add support diffusion models (Fal.AI SD3 to start), accessible via '@ mention' +- [Complete] Add AI Gateway to support multiple models and embeddings. (OpenAI, Azure OpenAI, Anyscale, Google Gemini & Palm, Anthropic, Cohere, Together AI, Perplexity, Mistral, Nomic, AI21, Stability AI, DeepInfra, Ollama, etc) +```https://github.com/Portkey-AI/gateway``` +- [Complete] Add support for semantic caching to improve response times +- [Complete] Add support for dynamic and conditionally rendered UI components based on the user's query -By default, gpt-engineer supports OpenAI Models via the OpenAI API or Azure OpenAI API, as well as Anthropic models. +![Example](https://media.giphy.com/media/v1.Y2lkPTc5MGI3NjExN284d3p5azAyNHpubm9mb2F0cnB6MWdtcTdnd2Nkb2d1ZnRtMG0yYiZlcD12MV9pbnRlcm5hbF9naWZfYnlfaWQmY3Q9Zw/OMpt8ZbBsjphZz6mue/giphy.gif) -With a little extra setup, you can also run with open source models like WizardCoder. See the [documentation](https://gpt-engineer.readthedocs.io/en/latest/open_models.html) for example instructions. +- [Completed] Add dark mode support based on the user's system preference -## Mission +### Backend + Node Only Express API -The gpt-engineer community mission is to **maintain tools that coding agent builders can use and facilitate collaboration in the open source community**. +[Watch the express tutorial here](https://youtu.be/43ZCeBTcsS8) for a detailed guide on setting up and running this project. +In addition to the Next.JS version of the project, there is a backend only version that uses Node.js and Express. Which is located in the 'express-api' directory. This is a standalone version of the project that can be used as a reference for building a similar API. There is also a readme file in the 'express-api' directory that explains how to run the backend version. -If you are interested in contributing to this, we are interested in having you. +### Upstash Redis Rate Limiting +[Watch the Upstash Redis Rate Limiting tutorial here](https://youtu.be/3_aNVu6EU3Y) for a detailed guide on setting up and running this project. +Upstash Redis Rate Limiting is a free tier service that allows you to set up rate limiting for your application. It provides a simple and easy-to-use interface for configuring and managing rate limits. With Upstash, you can easily set limits on the number of requests per user, IP address, or other criteria. This can help prevent abuse and ensure that your application is not overwhelmed with requests. -If you want to see our broader ambitions, check out the [roadmap](https://github.com/gpt-engineer-org/gpt-engineer/blob/main/ROADMAP.md), and join -[discord](https://discord.gg/8tcDQ89Ej2) -to learn how you can [contribute](.github/CONTRIBUTING.md) to it. +## Contributing -gpt-engineer is [governed](https://github.com/gpt-engineer-org/gpt-engineer/blob/main/GOVERNANCE.md) by a board of long-term contributors. If you contribute routinely and have an interest in shaping the future of gpt-engineer, you will be considered for the board. +Contributions to the project are welcome. Feel free to fork the repository, make your changes, and submit a pull request. You can also open issues to suggest improvements or report bugs. -## Significant contributors - +## License -## Example +This project is licensed under the MIT License. +[![Star History Chart](https://api.star-history.com/svg?repos=developersdigest/llm-answer-engine&type=Date)](https://star-history.com/#developersdigest/llm-answer-engine&Date) +I'm the developer behind Developers Digest. If you find my work helpful or enjoy what I do, consider supporting me. Here are a few ways you can do that: -https://github.com/gpt-engineer-org/gpt-engineer/assets/4467025/40d0a9a8-82d0-4432-9376-136df0d57c99 +- **Patreon**: Support me on Patreon at [patreon.com/DevelopersDigest](https://www.patreon.com/DevelopersDigest) +- **Buy Me A Coffee**: You can buy me a coffee at [buymeacoffee.com/developersdigest](https://www.buymeacoffee.com/developersdigest) +- **Website**: Check out my website at [developersdigest.tech](https://developersdigest.tech) +- **Github**: Follow me on GitHub at [github.com/developersdigest](https://github.com/developersdigest) +- **Twitter**: Follow me on Twitter at [twitter.com/dev__digest](https://twitter.com/dev__digest) diff --git a/ROADMAP.md b/ROADMAP.md deleted file mode 100644 index f3759edbf3..0000000000 --- a/ROADMAP.md +++ /dev/null @@ -1,31 +0,0 @@ -# Roadmap - -image - - -This document is a general roadmap guide to the gpt-engineer project's strategic direction. -Our goal is to continually improve by focusing on three main pillars: -- User Experience, -- Technical Features, and -- Performance Tracking/Testing. - -Each pillar is supported by a set of epics, reflecting our major goals and initiatives. - - -## Tracking Progress with GitHub Projects - -We are using [GitHub Projects](https://github.com/orgs/gpt-engineer-org/projects/3) to track the progress of our roadmap. - -Each issue within our project is categorized under one of the main pillars and, in most cases, associated epics. You can check our [Project's README](https://github.com/orgs/gpt-engineer-org/projects/3?pane=info) section to understand better our logic and organization. - - - -# How you can help out - -You can: - -- Post a "design" as a Google Doc in our [Discord](https://discord.com/channels/1119885301872070706/1120698764445880350), and ask for feedback to address one of the items in the roadmap -- Submit PRs to address one of the items in the roadmap -- Do a review of someone else's PR and propose next steps (further review, merge, close) - -🙌 Volunteer work in any of these will get acknowledged.🙌 diff --git a/TERMS_OF_USE.md b/TERMS_OF_USE.md deleted file mode 100644 index 91e97b86a2..0000000000 --- a/TERMS_OF_USE.md +++ /dev/null @@ -1,13 +0,0 @@ -# Terms of Use - -Welcome to gpt-engineer! By utilizing this powerful tool, you acknowledge and agree to the following comprehensive Terms of Use. We also encourage you to review the linked [disclaimer of warranty](https://github.com/gpt-engineer-org/gpt-engineer/blob/main/DISCLAIMER.md) for additional information. - -Both OpenAI, L.L.C. and the dedicated creators behind the remarkable gpt-engineer have implemented a data collection process focused on enhancing the product's capabilities. This endeavor is undertaken with utmost care and dedication to safeguarding user privacy. Rest assured that no information that could be directly attributed to any individual is stored. - -It's important to be aware that the utilization of natural text inputs, including the 'prompt' and 'feedback' files, may be subject to storage. While it's theoretically possible to establish connections between a person's writing style or content within these files and their real-life identity, please note that the creators of gpt-engineer explicitly assure that such attempts will never be made. - -For a deeper understanding of OpenAI's overarching terms of use, we encourage you to explore the details available [here](https://openai.com/policies/terms-of-use). - -Optionally, gpt-engineer collects usage data for the purpose of improving gpt-engineer. Data collection only happens when a consent file called .gpte_consent is present in the gpt-engineer directory. Note that gpt-engineer cannot prevent that data streams passing through gpt-engineer to a third party may be stored by that third party (for example OpenAI). - -Your engagement with gpt-engineer is an acknowledgment and acceptance of these terms, demonstrating your commitment to using this tool responsibly and within the bounds of ethical conduct. We appreciate your trust and look forward to the exciting possibilities that gpt-engineer can offer in your endeavors. diff --git a/WINDOWS_README.md b/WINDOWS_README.md deleted file mode 100644 index 1553d542dd..0000000000 --- a/WINDOWS_README.md +++ /dev/null @@ -1,68 +0,0 @@ -# Windows Setup -## Short version - -On Windows, follow the standard [README.md](https://github.com/gpt-engineer-org/gpt-engineer/blob/main/README.md), but to set API key do one of: -- `set OPENAI_API_KEY=[your api key]` on cmd -- `$env:OPENAI_API_KEY="[your api key]"` on powershell - -## Full setup guide - -Choose either **stable** or **development**. - -For **stable** release: - -Run `pip install gpt-engineer` in the command line as an administrator - -Or: - - 1. Open your web browser and navigate to the Python Package Index (PyPI) website: . - 2. On the PyPI page for the gpt-engineer package, locate the "Download files" section. Here you'll find a list of available versions and their corresponding download links. - 3. Identify the version of gpt-engineer you want to install and click on the associated download link. This will download the package file (usually a .tar.gz or .whl file) to your computer. - 4. Once the package file is downloaded, open your Python development environment or IDE. - 5. In your Python development environment, look for an option to install packages or manage dependencies. The exact location and terminology may vary depending on your IDE. For example, in PyCharm, you can go to "File" > "Settings" > "Project: \" > "Python Interpreter" to manage packages. - 6. In the package management interface, you should see a list of installed packages. Look for an option to add or install a new package. - 7. Click on the "Add Package" or "Install Package" button. - 8. In the package installation dialog, choose the option to install from a file or from a local source. - 9. Browse and select the downloaded gpt-engineer package file from your computer. - -For **development**: - -- `git clone git@github.com:gpt-engineer-org/gpt-engineer.git` -- `cd gpt-engineer` -- `poetry install` -- `poetry shell` to activate the virtual environment - -### Setup - -With an api key from OpenAI: - -Run `set OPENAI_API_KEY=[your API key]` in the command line - -Or: - - 1. In the Start Menu, type to search for "Environment Variables" and click on "Edit the system environment variables". - 2. In the System Properties window, click on the "Environment Variables" button. - 3. In the Environment Variables window, you'll see two sections: User variables and System variables. - 4. To set a user-specific environment variable, select the "New" button under the User variables section. - 5. To set a system-wide environment variable, select the "New" button under the System variables section. - 6. Enter the variable name "OPENAI_API_KEY" in the "Variable name" field. - 7. Enter the variable value (e.g., your API key) in the "Variable value" field. - 8. Click "OK" to save the changes. - 9. Close any open command prompt or application windows and reopen them for the changes to take effect. - -Now you can use `%OPENAI_API_KEY%` when prompted to input your key. - -### Run - -- Create an empty folder. If inside the repo, you can: - - Run `xcopy /E projects\example projects\my-new-project` in the command line - - Or hold CTRL and drag the folder down to create a copy, then rename to fit your project -- Fill in the `prompt` file in your new folder -- `gpt-engineer projects/my-new-project` - - (Note, `gpt-engineer --help` lets you see all available options. For example `--steps use_feedback` lets you improve/fix code in a project) - -By running gpt-engineer you agree to our [ToS](https://github.com/gpt-engineer-org/gpt-engineer/blob/main/TERMS_OF_USE.md). - -### Results - -- Check the generated files in `projects/my-new-project/workspace` diff --git a/app/action.tsx b/app/action.tsx new file mode 100644 index 0000000000..464bef06c1 --- /dev/null +++ b/app/action.tsx @@ -0,0 +1,88 @@ +"use server"; + +import { createAI, createStreamableValue } from 'ai/rsc'; +import { config } from './config'; +import { functionCalling } from './function-calling'; +import { getSearchResults, getImages, getVideos } from './tools/searchProviders'; +import { get10BlueLinksContents, processAndVectorizeContent } from './tools/contentProcessing'; +import { setInSemanticCache, clearSemanticCache, initializeSemanticCache, getFromSemanticCache } from './tools/semanticCache'; +import { relevantQuestions } from './tools/generateRelevantQuestions'; +import { streamingChatCompletion } from './tools/streamingChatCompletion'; +import { checkRateLimit } from './tools/rateLimiting'; +import { lookupTool } from './tools/mentionTools'; + +async function myAction(userMessage: string, mentionTool: string | null, logo: string | null, file: string): Promise { + "use server"; + const streamable = createStreamableValue({}); + + (async () => { + await checkRateLimit(streamable); + + await initializeSemanticCache(); + + const cachedData = await getFromSemanticCache(userMessage); + if (cachedData) { + streamable.update({ cachedData }); + return; + } + + if (mentionTool) { + await lookupTool(mentionTool, userMessage, streamable, file); + } + + const [images, sources, videos, conditionalFunctionCallUI] = await Promise.all([ + getImages(userMessage), + getSearchResults(userMessage), + getVideos(userMessage), + functionCalling(userMessage), + ]); + + streamable.update({ searchResults: sources, images, videos }); + + if (config.useFunctionCalling) { + streamable.update({ conditionalFunctionCallUI }); + } + + const html = await get10BlueLinksContents(sources); + const vectorResults = await processAndVectorizeContent(html, userMessage); + const accumulatedLLMResponse = await streamingChatCompletion(userMessage, vectorResults, streamable); + const followUp = await relevantQuestions(sources, userMessage); + + streamable.update({ followUp }); + + setInSemanticCache(userMessage, { + searchResults: sources, + images, + videos, + conditionalFunctionCallUI: config.useFunctionCalling ? conditionalFunctionCallUI : undefined, + llmResponse: accumulatedLLMResponse, + followUp, + semanticCacheKey: userMessage + }); + + streamable.done({ status: 'done' }); + })(); + + return streamable.value; +} + +const initialAIState: { + role: 'user' | 'assistant' | 'system' | 'function'; + content: string; + id?: string; + name?: string; +}[] = []; + +const initialUIState: { + id: number; + display: React.ReactNode; +}[] = []; + +export const AI = createAI({ + actions: { + myAction, + clearSemanticCache + }, + initialUIState, + initialAIState, +}); \ No newline at end of file diff --git a/app/config.tsx b/app/config.tsx new file mode 100644 index 0000000000..05fd6398d4 --- /dev/null +++ b/app/config.tsx @@ -0,0 +1,23 @@ +// - The below are going to be the default values, eventually this will move to a UI component so it can be easily changed by the user +// - To enable + use Ollama models, ensure inference and/or embeddings model are downloaded and ollama is running https://ollama.com/library +// - Icons within UI are not yet dynamic, to change currently, you must change the img src path in the UI component +// - IMPORTANT: when Ollama Embeddings + Ollama inference enabled at the same time, this can cause time-to-first-token to be quite long +// - IMPORTANT: Follow-up questions are not yet implrmented with Ollama models, only OpenAI compatible models that use {type: "json_object"} + +export const config = { + useOllamaInference: false, + useOllamaEmbeddings: false, + searchProvider: 'serper', // 'serper', 'google' // 'serper' is the default + inferenceModel: 'llama-3.1-70b-versatile', // Groq: 'mixtral-8x7b-32768', 'gemma-7b-it' // OpenAI: 'gpt-3.5-turbo', 'gpt-4' // Ollama 'mistral', 'llama3' etc + inferenceAPIKey: process.env.GROQ_API_KEY, // Groq: process.env.GROQ_API_KEY // OpenAI: process.env.OPENAI_API_KEY // Ollama: 'ollama' is the default + embeddingsModel: 'text-embedding-3-small', // Ollama: 'llama2', 'nomic-embed-text' // OpenAI 'text-embedding-3-small', 'text-embedding-3-large' + textChunkSize: 800, // Recommended to decrease for Ollama + textChunkOverlap: 200, // Recommended to decrease for Ollama + numberOfSimilarityResults: 4, // Number of similarity results to return per page + numberOfPagesToScan: 10, // Recommended to decrease for Ollama + nonOllamaBaseURL: 'https://api.groq.com/openai/v1', //Groq: https://api.groq.com/openai/v1 // OpenAI: https://api.openai.com/v1 + useFunctionCalling: true, // Set to true to enable function calling and conditional streaming UI (currently in beta) + useRateLimiting: false, // Uses Upstash rate limiting to limit the number of requests per user + useSemanticCache: false, // Uses Upstash semantic cache to store and retrieve data for faster response times + usePortkey: false, // Uses Portkey for AI Gateway in @mentions (currently in beta) see config-tools.tsx to configure + mentionTools.tsx for source code +} diff --git a/app/function-calling.tsx b/app/function-calling.tsx new file mode 100644 index 0000000000..a81021ae09 --- /dev/null +++ b/app/function-calling.tsx @@ -0,0 +1,211 @@ +// @ts-nocheck +import { OpenAI } from 'openai'; +import { config } from './config'; +import { SpotifyApi } from "@spotify/web-api-ts-sdk"; + +const client = new OpenAI({ + baseURL: config.nonOllamaBaseURL, + apiKey: config.inferenceAPIKey +}); +const MODEL = config.inferenceModel; + +const api = SpotifyApi.withClientCredentials( + process.env.SPOTIFY_CLIENT_ID as string, + process.env.SPOTIFY_CLIENT_SECRET as string +); + +export async function searchPlaces(query: string, location: string) { + try { + const response = await fetch('https://google.serper.dev/places', { + method: 'POST', + headers: { + 'X-API-KEY': process.env.SERPER_API, + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ q: query, location: location }), + }); + const data = await response.json(); + const normalizedData = { + type: 'places', + places: data.places.map(place => ({ + position: place.position, + title: place.title, + address: place.address, + latitude: place.latitude, + longitude: place.longitude, + rating: place.rating, + ratingCount: place.ratingCount, + category: place.category, + phoneNumber: place.phoneNumber, + website: place.website, + cid: place.cid + })) + }; + return JSON.stringify(normalizedData); + } catch (error) { + console.error('Error searching for places:', error); + return JSON.stringify({ error: 'Failed to search for places' }); + } +} +export async function goShopping(message: string) { + const url = 'https://google.serper.dev/shopping'; + const requestOptions: RequestInit = { + method: 'POST', + headers: { + 'X-API-KEY': process.env.SERPER_API as string, + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ "q": message }) + }; + try { + const response = await fetch(url, requestOptions); + if (!response.ok) { + console.error(`Failed to fetch ${url}. Status: ${response.status}`); + } + const responseData = await response.json(); + const shoppingData = { + type: 'shopping', + shopping: responseData.shopping + }; + return JSON.stringify(shoppingData); + } catch (error) { + console.error('Error fetching shopping data:', error); + } +} +export async function getTickers(ticker: string) { + return JSON.stringify({ type: 'ticker', data: ticker }); +} +export async function searchSong(query: string): Promise { + const items = await api.search(query, ["track"]); + const track = items.tracks.items[0]; + if (track) { + const trackId = track.uri.replace('spotify:track:', ''); + return JSON.stringify({ trackId: trackId }); + } else { + return JSON.stringify({ error: "No matching song found." }); + } +} +export async function functionCalling(query: string) { + try { + const messages = [ + { role: "system", content: "You are a function calling agent. You will be given a query and a list of functions. Your task is to call the appropriate function based on the query and return the result in JSON format. ONLY CALL A FUNCTION IF YOU ARE HIGHLY CONFIDENT IT WILL BE USED" }, + { role: "user", content: query }, + ]; + const tools = [ + { + type: "function", + function: { + name: "getTickers", + description: "Get a single market name and stock ticker if the user mentions a public company", + parameters: { + type: "object", + properties: { + ticker: { + type: "string", + description: "The stock ticker symbol and market name, example NYSE:K or NASDAQ:AAPL", + }, + }, + required: ["ticker"], + }, + }, + }, + { + type: "function", + function: { + name: "searchPlaces", + description: "ONLY SEARCH for places using the given query and location", + parameters: { + type: "object", + properties: { + query: { + type: "string", + description: "The search query for places", + }, + location: { + type: "string", + description: "The location to search for places", + }, + }, + required: ["query", "location"], + }, + }, + }, + { + type: "function", + function: { + name: "goShopping", + description: "Search for shopping items using the given query", + parameters: { + type: "object", + properties: { + query: { + type: "string", + description: "The search query for shopping items", + }, + }, + required: ["query"], + }, + } + }, + { + type: "function", + function: { + name: "searchSong", + description: "Searches for a song on Spotify based on the provided search query and returns the track ID.", + parameters: { + type: "object", + properties: { + query: { + type: "string", + description: "The search query to find a song on Spotify, such as the song title or artist name.", + }, + }, + required: ["query"], + }, + }, + }, + ]; + const response = await client.chat.completions.create({ + model: MODEL, + messages: messages, + tools: tools, + tool_choice: "auto", + max_tokens: 4096, + }); + const responseMessage = response.choices[0].message; + const toolCalls = responseMessage.tool_calls; + if (toolCalls) { + const availableFunctions = { + getTickers: getTickers, + searchPlaces: searchPlaces, + goShopping: goShopping, + searchSong: searchSong, + }; + messages.push(responseMessage); + for (const toolCall of toolCalls) { + const functionName = toolCall.function.name; + const functionToCall = availableFunctions[functionName]; + const functionArgs = JSON.parse(toolCall.function.arguments); + let functionResponse; + try { + if (functionName === 'getTickers') { + functionResponse = await functionToCall(functionArgs.ticker); + } else if (functionName === 'searchPlaces') { + functionResponse = await functionToCall(functionArgs.query, functionArgs.location); + } else if (functionName === 'goShopping') { + functionResponse = await functionToCall(functionArgs.query); + } else if (functionName === 'searchSong') { + functionResponse = await functionToCall(functionArgs.query); + } + return JSON.parse(functionResponse); + } catch (error) { + console.error(`Error calling function ${functionName}:`, error); + return JSON.stringify({ error: `Failed to call function ${functionName}` }); + } + } + } + } catch (error) { + console.error('Error in functionCalling:', error); + return JSON.stringify({ error: 'An error occurred during function calling' }); + } +} \ No newline at end of file diff --git a/app/globals.css b/app/globals.css new file mode 100644 index 0000000000..ea4d868c34 --- /dev/null +++ b/app/globals.css @@ -0,0 +1,213 @@ +@tailwind base; +@tailwind components; +@tailwind utilities; + +@layer base { + :root { + --background: 0 0% 100%; + --foreground: 240 10% 3.9%; + --muted: 240 4.8% 95.9%; + --muted-foreground: 240 3.8% 46.1%; + --popover: 0 0% 100%; + --popover-foreground: 240 10% 3.9%; + --card: 0 0% 100%; + --card-foreground: 240 10% 3.9%; + --border: 240 5.9% 90%; + --input: 240 5.9% 90%; + --primary: 240 5.9% 10%; + --primary-foreground: 0 0% 98%; + --secondary: 240 4.8% 95.9%; + --secondary-foreground: 240 5.9% 10%; + --accent: 240 4.8% 95.9%; + --accent-foreground: ; + --destructive: 0 84.2% 60.2%; + --destructive-foreground: 0 0% 98%; + --ring: 240 5% 64.9%; + --radius: 0.5rem; + } + + .dark { + --background: 240 10% 3.9%; + --foreground: 0 0% 98%; + --muted: 240 3.7% 15.9%; + --muted-foreground: 240 5% 64.9%; + --popover: 240 10% 3.9%; + --popover-foreground: 0 0% 98%; + --card: 240 10% 3.9%; + --card-foreground: 0 0% 98%; + --border: 240 3.7% 15.9%; + --input: 240 3.7% 15.9%; + --primary: 0 0% 98%; + --primary-foreground: 240 5.9% 10%; + --secondary: 240 3.7% 15.9%; + --secondary-foreground: 0 0% 98%; + --accent: 240 3.7% 15.9%; + --accent-foreground: ; + --destructive: 0 62.8% 30.6%; + --destructive-foreground: 0 85.7% 97.3%; + --ring: 240 3.7% 15.9%; + } +} + +@layer base { + * { + @apply border-border; + } + + body { + @apply bg-background text-foreground; + } +} + +.keyboard-visible { + transform: translateY(-100%); + transition: transform 0.3s ease-in-out; +} + +.leaflet-popup-content p { + margin: 0 !important; +} + +/* leaflet made me do it.. */ +.bring-to-front { + z-index: 99999 !important; +} + +.bring-to-front-modal { + z-index: 99999999 !important; +} + +.rate-limit-modal { + z-index: 999999999999 !important; +} + +/* tradingview mobile */ +.my-5.tradingview-widget-container { + height: 150px !important; +} + +/* Markdown styles */ +/* Markdown styles */ +.markdown-container { + font-family: Arial, sans-serif; + line-height: 1.6; + color: #333; + font-size: 16px; +} + +.markdown-container h1, +.markdown-container h2, +.markdown-container h3, +.markdown-container h4, +.markdown-container h5, +.markdown-container h6 { + font-weight: bold; + margin-top: 1em; + margin-bottom: 0.5em; + border-bottom: 1px solid #ddd; + padding-bottom: 0.3em; +} + +.markdown-container h1 { + font-size: 1.5em; +} + +.markdown-container h2 { + font-size: 1.4em; +} + +.markdown-container h3 { + font-size: 1.3em; +} + +.markdown-container h4 { + font-size: 1.2em; +} + +.markdown-container h5 { + font-size: 1.1em; +} + +.markdown-container h6 { + font-size: 1em; +} + +.markdown-container p { + margin-bottom: 1em; +} + +.markdown-container strong { + font-weight: bold; +} + +.markdown-container em { + font-style: italic; +} + +.markdown-container a { + color: #000; + text-decoration: underline; +} + +.markdown-container code { + font-family: monospace; + font-size: 0.9em; + padding: 0.2em 0.4em; + background-color: #f6f8fa; + border-radius: 3px; +} + +.markdown-container pre { + font-family: monospace; + font-size: 0.9em; + padding: 1em; + overflow: auto; + background-color: #f6f8fa; + border-radius: 3px; +} + +.markdown-container pre code { + padding: 0; + background-color: transparent; +} + +.markdown-container ul, +.markdown-container ol { + margin-bottom: 1em; + padding-left: 2em; +} + +.markdown-container ul li, +.markdown-container ol li { + margin-bottom: 0.5em; +} + +.markdown-container ul li { + list-style-type: disc; +} + +.markdown-container ol li { + list-style-type: decimal; +} + +.markdown-container blockquote { + margin: 1em 0; + padding: 0.5em 1em; + border-left: 4px solid #ddd; + color: #666; +} + +.markdown-container hr { + border: none; + border-top: 1px solid #ddd; + margin: 1.5em 0; +} + +.fixed.inset-0.z-50.bg-black\/80.data-\[state\=open\]\:animate-in.data-\[state\=closed\]\:animate-out.data-\[state\=closed\]\:fade-out-0.data-\[state\=open\]\:fade-in-0 { + z-index: 99999; +} + +.clip-yt-img { + margin-top: -24px; + clip-path: inset(30px 0 30px 0); +} \ No newline at end of file diff --git a/app/layout.tsx b/app/layout.tsx new file mode 100644 index 0000000000..e29d93ab0d --- /dev/null +++ b/app/layout.tsx @@ -0,0 +1,80 @@ +import type { Metadata } from 'next'; +import { GeistMono } from 'geist/font/mono'; +import { GeistSans } from 'geist/font/sans'; +import { Analytics } from '@vercel/analytics/react'; +import { Toaster } from '@/components/ui/toaster'; +import './globals.css'; + +import { AI } from './action'; +import { Header } from '@/components/header'; +import { Providers } from '@/components/providers'; + +const meta = { + title: 'answers, how they should be displayed.', + description: + 'anwser engine built by developers digest', +}; +export const metadata: Metadata = { + ...meta, + title: { + default: 'answer website', + template: `%s - answer website`, + }, + icons: { + icon: '/favicon.ico', + shortcut: '/favicon-16x16.png', + apple: '/apple-touch-icon.png', + }, + twitter: { + ...meta, + card: 'summary_large_image', + site: '@vercel', + }, + openGraph: { + ...meta, + locale: 'en-US', + type: 'website', + }, +}; + +export const viewport = { + themeColor: [ + { media: '(prefers-color-scheme: light)', color: 'white' }, + { media: '(prefers-color-scheme: dark)', color: 'black' }, + ], +}; + +export default function RootLayout({ + children, +}: Readonly<{ + children: React.ReactNode; +}>) { + return ( + + + + + + +
+
+
+ {children} +
+
+
+
+ + + + ); +} + +export const runtime = 'edge'; diff --git a/app/page.tsx b/app/page.tsx new file mode 100644 index 0000000000..429adaa177 --- /dev/null +++ b/app/page.tsx @@ -0,0 +1,515 @@ +'use client'; +// 1. Import Dependencies +import { FormEvent, useEffect, useRef, useState, useCallback } from 'react'; +import { useActions, readStreamableValue } from 'ai/rsc'; +import { type AI } from './action'; +import { ChatScrollAnchor } from '@/lib/hooks/chat-scroll-anchor'; +import Textarea from 'react-textarea-autosize'; +import { useEnterSubmit } from '@/lib/hooks/use-enter-submit'; +import { Tooltip, TooltipContent, TooltipTrigger, } from '@/components/ui/tooltip'; +import { Button } from '@/components/ui/button'; +import dynamic from 'next/dynamic'; +// Main components +import SearchResultsComponent from '@/components/answer/SearchResultsComponent'; +import UserMessageComponent from '@/components/answer/UserMessageComponent'; +import FollowUpComponent from '@/components/answer/FollowUpComponent'; +import InitialQueries from '@/components/answer/InitialQueries'; +// Sidebar components +import LLMResponseComponent from '@/components/answer/LLMResponseComponent'; +import ImagesComponent from '@/components/answer/ImagesComponent'; +import VideosComponent from '@/components/answer/VideosComponent'; +// Function calling components +const MapComponent = dynamic(() => import('@/components/answer/Map'), { ssr: false, }); +import MapDetails from '@/components/answer/MapDetails'; +import ShoppingComponent from '@/components/answer/ShoppingComponent'; +import FinancialChart from '@/components/answer/FinancialChart'; +import Spotify from '@/components/answer/Spotify'; +import ImageGenerationComponent from '@/components/answer/ImageGenerationComponent'; +import { ArrowUp, Paperclip } from '@phosphor-icons/react'; +// OPTIONAL: Use Upstash rate limiting to limit the number of requests per user +import RateLimit from '@/components/answer/RateLimit'; +import { mentionToolConfig } from './tools/mentionToolConfig'; +// 2. Set up types +interface SearchResult { + favicon: string; + link: string; + title: string; +} +interface Message { + falBase64Image: any; + logo: string | undefined; + semanticCacheKey: any; + cachedData: string; + id: number; + type: string; + content: string; + userMessage: string; + images: Image[]; + videos: Video[]; + followUp: FollowUp | null; + isStreaming: boolean; + searchResults?: SearchResult[]; + conditionalFunctionCallUI?: any; + status?: string; + places?: Place[]; + shopping?: Shopping[]; + ticker?: string | undefined; + spotify?: string | undefined; + isolatedView: boolean; + +} +interface StreamMessage { + isolatedView: any; + searchResults?: any; + userMessage?: string; + llmResponse?: string; + llmResponseEnd?: boolean; + images?: any; + videos?: any; + followUp?: any; + conditionalFunctionCallUI?: any; + status?: string; + places?: Place[]; + shopping?: Shopping[]; + ticker?: string; + spotify?: string; + cachedData?: string; + semanticCacheKey?: any; + falBase64Image?: any; +} +interface Image { + link: string; +} +interface Video { + link: string; + imageUrl: string; +} +interface Place { + cid: React.Key | null | undefined; + latitude: number; + longitude: number; + title: string; + address: string; + rating: number; + category: string; + phoneNumber?: string; + website?: string; +} +interface FollowUp { + choices: { + message: { + content: string; + }; + }[]; +} +interface Shopping { + type: string; + title: string; + source: string; + link: string; + price: string; + shopping: any; + position: number; + delivery: string; + imageUrl: string; + rating: number; + ratingCount: number; + offers: string; + productId: string; +} + +const mentionTools = mentionToolConfig.useMentionQueries ? mentionToolConfig.mentionTools : []; + +export default function Page() { + const [file, setFile] = useState(''); + const [mentionQuery, setMentionQuery] = useState(''); + const [selectedMentionTool, setSelectedMentionTool] = useState(null); + const [selectedMentionToolLogo, setSelectedMentionToolLogo] = useState(null); + const [showRAG, setShowRAG] = useState(false); + // 3. Set up action that will be used to stream all the messages + const { myAction } = useActions(); + // 4. Set up form submission handling + const { formRef, onKeyDown } = useEnterSubmit(); + const inputRef = useRef(null); + const [inputValue, setInputValue] = useState(''); + // 5. Set up state for the messages + const [messages, setMessages] = useState([]); + // 6. Set up state for the CURRENT LLM response (for displaying in the UI while streaming) + const [currentLlmResponse, setCurrentLlmResponse] = useState(''); + // 7. Set up handler for when the user clicks on the follow up button + const handleFollowUpClick = useCallback(async (question: string) => { + setCurrentLlmResponse(''); + await handleUserMessageSubmission({ message: question, mentionTool: null, logo: null, file: file }); + }, []); + + // 8. For the form submission, we need to set up a handler that will be called when the user submits the form + useEffect(() => { + const handleKeyDown = (e: KeyboardEvent) => { + if (e.key === '/') { + if ( + e.target && + ['INPUT', 'TEXTAREA'].includes((e.target as HTMLElement).nodeName) + ) { + return; + } + e.preventDefault(); + e.stopPropagation(); + if (inputRef?.current) { + inputRef.current.focus(); + } + } + }; + document.addEventListener('keydown', handleKeyDown); + return () => { + document.removeEventListener('keydown', handleKeyDown); + }; + }, [inputRef]); + // 9. Set up handler for when a submission is made, which will call the myAction function + const handleSubmit = async (payload: { message: string; mentionTool: string | null, logo: string | null, file: string }) => { + if (!payload.message) return; + await handleUserMessageSubmission(payload); + }; + const handleFormSubmit = async (e: React.FormEvent): Promise => { + e.preventDefault(); + if (!inputValue.trim()) return; + setInputValue(''); + + const payload = { + message: inputValue.trim(), + mentionTool: selectedMentionTool, + logo: selectedMentionToolLogo, + file: file, + }; + await handleSubmit(payload); + setShowRAG(false); + setSelectedMentionTool(null); + setSelectedMentionToolLogo(null); + setFile(''); + + }; + const handleUserMessageSubmission = async (payload: { + logo: any; message: string; mentionTool: string | null, file: string + }): Promise => { + const newMessageId = Date.now(); + const newMessage = { + id: newMessageId, + type: 'userMessage', + userMessage: payload.message, + mentionTool: payload.mentionTool, + file: payload.file, + logo: payload.logo, + content: '', + images: [], + videos: [], + followUp: null, + isStreaming: true, + searchResults: [] as SearchResult[], + places: [] as Place[], + shopping: [] as Shopping[], + status: '', + ticker: undefined, + spotify: undefined, + semanticCacheKey: null, + cachedData: '', + isolatedView: !!payload.mentionTool, // Set isolatedView based on mentionTool + falBase64Image: null, + }; + setMessages(prevMessages => [...prevMessages, newMessage]); + let lastAppendedResponse = ""; + try { + const streamableValue = await myAction(payload.message, payload.mentionTool, payload.logo, payload.file); + + let llmResponseString = ""; + for await (const message of readStreamableValue(streamableValue)) { + const typedMessage = message as StreamMessage; + setMessages((prevMessages) => { + const messagesCopy = [...prevMessages]; + const messageIndex = messagesCopy.findIndex(msg => msg.id === newMessageId); + if (messageIndex !== -1) { + const currentMessage = messagesCopy[messageIndex]; + + currentMessage.status = typedMessage.status === 'rateLimitReached' ? 'rateLimitReached' : currentMessage.status; + + if (typedMessage.isolatedView) { + currentMessage.isolatedView = true; + } + + if (typedMessage.llmResponse && typedMessage.llmResponse !== lastAppendedResponse) { + currentMessage.content += typedMessage.llmResponse; + lastAppendedResponse = typedMessage.llmResponse; + } + + currentMessage.isStreaming = typedMessage.llmResponseEnd ? false : currentMessage.isStreaming; + currentMessage.searchResults = typedMessage.searchResults || currentMessage.searchResults; + currentMessage.images = typedMessage.images ? [...typedMessage.images] : currentMessage.images; + currentMessage.videos = typedMessage.videos ? [...typedMessage.videos] : currentMessage.videos; + currentMessage.followUp = typedMessage.followUp || currentMessage.followUp; + currentMessage.semanticCacheKey = messagesCopy[messageIndex]; + currentMessage.falBase64Image = typedMessage.falBase64Image; + + + if (typedMessage.conditionalFunctionCallUI) { + const functionCall = typedMessage.conditionalFunctionCallUI; + if (functionCall.type === 'places') currentMessage.places = functionCall.places; + if (functionCall.type === 'shopping') currentMessage.shopping = functionCall.shopping; + if (functionCall.type === 'ticker') currentMessage.ticker = functionCall.data; + if (functionCall.trackId) currentMessage.spotify = functionCall.trackId; + } + + if (typedMessage.cachedData) { + const data = JSON.parse(typedMessage.cachedData); + currentMessage.searchResults = data.searchResults; + currentMessage.images = data.images; + currentMessage.videos = data.videos; + currentMessage.content = data.llmResponse; + currentMessage.isStreaming = false; + currentMessage.semanticCacheKey = data.semanticCacheKey; + currentMessage.conditionalFunctionCallUI = data.conditionalFunctionCallUI; + currentMessage.followUp = data.followUp; + + + if (data.conditionalFunctionCallUI) { + const functionCall = data.conditionalFunctionCallUI; + if (functionCall.type === 'places') currentMessage.places = functionCall.places; + if (functionCall.type === 'shopping') currentMessage.shopping = functionCall.shopping; + if (functionCall.type === 'ticker') currentMessage.ticker = functionCall.data; + if (functionCall.trackId) currentMessage.spotify = functionCall.trackId; + } + } + } + return messagesCopy; + }); + if (typedMessage.llmResponse) { + llmResponseString += typedMessage.llmResponse; + setCurrentLlmResponse(llmResponseString); + } + } + } catch (error) { + console.error("Error streaming data for user message:", error); + } + }; + const handleFileUpload = (file: File) => { + console.log('file', file); + // file reader to read the file and set the file state + const fileReader = new FileReader(); + fileReader.onload = (e) => { + const base64File = e.target?.result; + if (base64File) { + console.log('base64File', base64File); + setFile(String(base64File)); + } + }; + fileReader.readAsDataURL(file) + + }; + return ( +
+ {messages.length > 0 && ( +
+ {messages.map((message, index) => ( +
+ {message.isolatedView ? ( + selectedMentionTool === 'fal-ai/stable-diffusion-v3-medium' + || message.falBase64Image + ? ( + + ) : ( + + ) + ) : ( + // Render regular view +
+
+ {message.status && message.status === 'rateLimitReached' && } + {message.type === 'userMessage' && } + {message.ticker && message.ticker.length > 0 && ( + + )} + {message.spotify && message.spotify.length > 0 && ( + + )} + {message.searchResults && ()} + {message.places && message.places.length > 0 && ( + + )} + + {message.followUp && ( +
+ +
+ )} +
+
+ + {message.shopping && message.shopping.length > 0 && } + {message.images && } + {message.videos && } + {message.places && message.places.length > 0 && ( + + )} + {message.falBase64Image && } +
+
+ )} +
+ ))} +
+ )} +
+
+ {messages.length === 0 && !inputValue && ( + + )} + {mentionQuery && ( +
+
+
+
    + {mentionTools + .filter((tool) => + tool.name.toLowerCase().includes(mentionQuery.toLowerCase()) + ) + .map((tool) => ( +
  • { + setSelectedMentionTool(tool.id); + setSelectedMentionToolLogo(tool.logo); + tool.enableRAG && setShowRAG(true); + setMentionQuery(""); + setInputValue(" "); // Update the input value with a single blank space + }} + > + {tool.logo ? + {tool.name} : + + + + + + } + + +

    + @{tool.name} +

    +
  • + ))} +
+
+ )} +
) => { + e.preventDefault(); + handleFormSubmit(e); + setCurrentLlmResponse(''); + if (window.innerWidth < 600) { + (e.target as HTMLFormElement)['message']?.blur(); + } + const value = inputValue.trim(); + setInputValue(''); + if (!value) return; + }} + > +
+ {selectedMentionToolLogo && ( + + )} + {showRAG && ( + <> + {/* increase size on hover */} + + { + const file = e.target.files?.[0]; + if (file) { + handleFileUpload(file); + } + }} + /> + + )} +