diff --git a/README.md b/README.md index 063c9d2..e50386f 100644 --- a/README.md +++ b/README.md @@ -1,20 +1,119 @@ -# Ollama Bash Eval +# ๐Ÿš Ollama Bash Eval (`oe`) -![Release](https://img.shields.io/github/v/release/attogram/ollama-bash-eval?style=flat) -![License](https://img.shields.io/github/license/attogram/ollama-bash-eval?style=flat) -![Bash โ‰ฅ3.2](https://img.shields.io/badge/bash-%3E=3.2-blue?style=flat) -![GitHub commit activity](https://img.shields.io/github/commit-activity/t/attogram/ollama-bash-eval?style=flat) -![GitHub stars](https://img.shields.io/github/stars/attogram/ollama-bash-eval?style=flat) -![GitHub watchers](https://img.shields.io/github/watchers/attogram/ollama-bash-eval?style=flat) -![Forks](https://img.shields.io/github/forks/attogram/ollama-bash-eval?style=flat) -![Issues](https://img.shields.io/github/issues/attogram/ollama-bash-eval?style=flat) +**`oe`** is a minimal AI-powered Bash CLI tool that uses local Ollama models to generate and explain Bash one-liners, or create files like HTML, Markdown, etc. -The `oe` command uses local LLMs via Ollama to translate your natural language requests into executable shell commands. +Built to be: +- POSIX-friendly +- Bash 3.2 compliant +- Shell-native (no config files) +- Model-flexible +- Pipe-friendly -For safety, it always shows you the generated command and asks for your approval before running anything. +--- -Stop forgetting the right flags for tar or git, just ask! +## ๐Ÿง  Example Usage -## Library +```bash +oe find all files over 10GB +oe -x how to show running processes +oe -c make a markdown file about SSH > ssh.md +oe -m llama3 list open ports +oe -m phi3 -x how to tail nginx logs +``` -`oe` uses a subset of the [Ollama Bash Lib](https://github.com/attogram/ollama-bash-lib) to interact with Ollama. +--- + +## ๐Ÿงฐ Options + +| Flag | Description | +|-----------|----------------------------------------------------| +| `-m` | Use a specific model and set `OE_MODEL` | +| `-x` | Add explanation (as `#` bash-style comment **before** the command) | +| `-c` | Create a file (e.g. HTML, Markdown, etc) | +| `-t` | Provide task as a separate argument | + +--- + +## ๐Ÿง  Model Behavior + +- `-m ` sets model **for this call** and **updates `OE_MODEL`** +- If `-x` is also used, prints: + ```bash + Using model: llama3 + To make this your default: export OE_MODEL=llama3 + ``` +- If no `-m`, uses `$OE_MODEL`, or falls back to a random model + +--- + +## ๐Ÿช„ Example Prompts Used + +### ๐Ÿ”น One-liner generation (`oe`) + +> Generate a safe, POSIX-compliant Bash one-liner. +> Task: `{{TASK}}`. +> Return only the command. + +--- + +### ๐Ÿ”น One-liner with explanation (`oe -x`) + +> Generate a safe, POSIX-compliant Bash one-liner. +> Task: `{{TASK}}`. +> Add a short explanation as a Bash comment (`# comment`) **before** the command. +> Output only the comment and command. + +--- + +### ๐Ÿ”น File creation (`oe -c`) + +> Create a plain text file based on this description: +> `{{TASK}}` +> Return only the raw content of the file. +> Do not include explanations or formatting outside the file content. + +--- + +## ๐Ÿ›ก๏ธ LLM Output Filtering + +- Handles imperfect LLMs (chatty, markdown, etc) +- Strips markdown/code blocks +- Extracts: + - First valid command + - First `#` explanation (if `-x` used) +- Ignores intros like: + - `"Sure! Here's a bash command:"` + +--- + +## ๐Ÿ”„ Persistence + +To persist model choice between calls: +```bash +export OE_MODEL=llama3 +``` + +Or let `oe` show you how: +```bash +oe -x -m llama3 "list open ports" +``` + +--- + +## โœ… Requirements + +- Bash 3.2+ +- `curl` +- `jq` +- Local Ollama server running with your model(s) + +--- + +## ๐Ÿงช Sample Output + +```bash +$ oe -x how to list biggest files + +# This lists the 10 largest files and directories in the current folder +du -ah . | sort -rh | head -n 10 +``` diff --git a/oe b/oe index 2789b0c..5dcec24 100755 --- a/oe +++ b/oe @@ -977,233 +977,128 @@ EOF # -- End Ollama Bash Lib functions -------------------------------------- -_ollama_eval_prompt() { - local task="$1" - if [[ -z "$task" ]]; then - _error 'ollama_eval: Task Not Found. Usage: oe "task" "model"' - return 1 - fi - - _eval_model="$(_is_valid_model "$2")" - if [[ -z "$_eval_model" ]]; then - _error 'ollama_eval: No Models Found' - return 1 - fi - - _eval_prompt='Write a bash one-liner to do the following task:\n\n' - _eval_prompt+="$task\n\n" - _eval_prompt+="You are on a $(uname -s) system, with bash version ${BASH_VERSION:-$(bash --version | head -n1)}.\n" - _eval_prompt+="If you can not do the task but you can instruct the user how to do it, then reply with an 'echo' command with your instructions.\n" - _eval_prompt+="If you can not do the task for any other reason, then reply with an 'echo' command with your reason.\n" - _eval_prompt+="Reply ONLY with the ready-to-run bash one-liner.\n" - _eval_prompt+='Do NOT add any commentary, description, markdown formatting or anything extraneous.\n' - -} - -_ollama_eval_check_sanity() { - local cmd="$1" - local first_word - read -r first_word _ <<<"$cmd" - #if [[ "$first_word" =~ ^[[:space:]]*[a-zA-Z_][a-zA-Z0-9_]*\(\) ]]; then - if [[ "$first_word" =~ ^[a-zA-Z_][a-zA-Z0-9_]*\(\) ]]; then - printf ' โœ… Valid start: function definition OK: %s\n' "$first_word" - return 0 - fi - if [[ "$first_word" =~ ^[a-zA-Z_][a-zA-Z0-9_]*= ]]; then - printf ' โœ… Valid start: variable assignment OK: %s\n' "$first_word" - return 0 - fi - if _exists "$first_word"; then - printf ' โœ… Valid start: %s\n' "$first_word" - return 0 - fi - printf ' โŒ Invalid start: %s\n' "$first_word" - return 1 -} - -_ollama_eval_check_syntax() { - local cmd="$1" - local errors - if _exists 'timeout'; then - if ! errors=$(timeout 1 bash -n <<<"$cmd" 2>&1); then - local rc=$? - printf ' โŒ Invalid Bash Syntax (code %s)\n%s\n' "$rc" "$errors" - return 1 +_filter_output() { + local input="$1" + local explain_mode="$2" + + # Strip markdown code blocks and trim whitespace + local clean_input + clean_input=$(printf '%s\n' "$input" | sed -e 's/^```bash//' -e 's/^```//' -e 's/```$//' | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//') + + if (( explain_mode )); then + # Find first line starting with # + local explanation + explanation=$(printf '%s' "$clean_input" | grep '^#' | head -n 1) + + # Find first line NOT starting with # + local command + command=$(printf '%s' "$clean_input" | grep -v '^#' | head -n 1) + + printf '%s\n' "$explanation" + printf '%s\n' "$command" + else + # Find first line NOT starting with # + local command + command=$(printf '%s' "$clean_input" | grep -v '^#' | head -n 1) + printf '%s\n' "$command" fi - printf ' โœ… Valid Bash Syntax\n' - return 0 - fi - - # TODO - if no timeout available, use bash subshell + timer subshell - _debug 'ollama_eval: timeout command not found' - if ! errors=$(bash -n <<<"$cmd" 2>&1); then - local rc=$? - printf ' โŒ Invalid Bash Syntax (code %s)\n%s\n' "$rc" "$errors" - return 1 - fi - printf ' โœ… Valid Bash Syntax (checked without timeout)\n' - return 0 -} - -_ollama_eval_check_danger() { - local cmd="$1" - local dangerous=( - 'rm' 'mv' 'dd' 'mkfs' 'shred' 'shutdown' 'reboot' 'init' 'kill' 'pkill' 'killall' - 'umount' 'mount' 'userdel' 'groupdel' 'passwd' 'su' 'sudo' 'systemctl' - 'bash' '/bin/sh' '-delete' 'exec' 'eval' 'source' '\.' - ) - local IFS='|' - local danger_regex="(^|[^[:alnum:]_])(${dangerous[*]})($|[^[:alnum:]_])" - if [[ "$cmd" =~ $danger_regex ]]; then - local bad="${BASH_REMATCH[2]}" - printf ' โš ๏ธ WARNING: The generated command contains a potentially dangerous token: "%s"\n' "$bad" - return 1 - fi - printf ' โœ… No dangerous commands found\n' - return 0 -} - -# Returns: 0 on Sandbox run, 1 on Abort, 2 on Request for dangerous mode -_ollama_eval_permission_sandbox() { - local cmd="$1" - printf '\nRun command in sandbox (y/N/eval)? ' - read -r permission - case "$permission" in - y|Y) - _debug "ollama_eval: sandboxed eval cmd: [${cmd:0:240}]" - echo - printf 'Running command in a sandboxed environment...\n\n' - env -i PATH="/bin:/usr/bin" bash -r -c "$cmd" - return 0 # ran in sandbox - ;; - eval|EVAL) - _debug 'eval here' - return 2 # request to run in dangerous mode - ;; - esac - return 1 # user aborted -} - -_ollama_eval_permission_eval() { - local cmd="$1" - printf '\nAre you sure you want to use the DANGEROUS eval mode? [y/N] ' - read -r permission - case "$permission" in - y|Y) - _debug "ollama_eval: dangerous eval cmd: [${cmd:0:240}]" - printf '\nRunning command in DANGEROUS eval mode...\n\n' - eval "$cmd" - return 0 # command was run in dangerous mode - ;; - esac - return 1 # user aborted } -# Command Line Eval -# -# Usage: oe -t [-m ] -# Input: 1 - The task to be run on the command line -# Input: 2 - Model to use to generate command (Optional) If empty, uses random model -# Output: prompts user for permission, then runs command -# Requires: none -# Returns: 0 on success, 1 or higher on error oe() { - local usage='Usage: ollama_eval -t [-m [-h] [-v]' - local description - description=$(cat <<'EOF' -Generate and evaluate a command-line task. - - -t The task to be run on the command line. - -m Model to use to generate command (Optional) If empty, uses random model. - -h Show this help and exit. - -v Show version information and exit. - -This function takes a description of a task, sends it to a model to generate a shell command, and then prompts the user for permission to execute it. -It includes safety features like syntax checking and a sandbox mode for execution. This is a powerful tool for converting natural language into shell commands. -EOF -) - local task= model= - while getopts ":t:m:hv" opt; do + local model explain create_file task + model="" + explain=0 + create_file=0 + task="" + local OPTIND + while getopts ":m:xct:" opt; do case $opt in - t) task=$OPTARG ;; - m) model=$OPTARG ;; - h) printf '%s\n\n%s\n' "$usage" "$description"; return 0 ;; - v) printf 'ollama_eval version %s\n' "$OLLAMA_LIB_VERSION"; return 0 ;; - \?) printf 'Error: unknown option -%s\n\n' "$OPTARG" >&2 - printf '%s\n' "$usage" >&2; return 2 ;; - :) printf 'Error: -%s requires an argument\n\n' "$OPTARG" >&2 - printf '%s\n' "$usage" >&2; return 2 ;; + m) + model=$OPTARG + ;; + x) + explain=1 + ;; + c) + create_file=1 + ;; + t) + task=$OPTARG + ;; + \?) + printf 'Error: unknown option -%s\n\n' "$OPTARG" >&2 + # TODO: print usage + return 2 + ;; + :) + printf 'Error: -%s requires an argument\n\n' "$OPTARG" >&2 + # TODO: print usage + return 2 + ;; esac done shift $((OPTIND-1)) - if [ -z "$task" ]; then - printf 'Error: -t is required\n\n' >&2 - printf '%s\n' "$usage" >&2 - return 2 + if [[ -z "$task" ]]; then + task="$*" fi - _debug "ollama_eval: [${task:0:42}] [${model:0:42}]" - - if ! _exists 'jq'; then _error 'ollama_eval: jq Not Found'; return 1; fi - - if ! _ollama_eval_prompt "$task" "$model"; then - _error 'ollama_eval: _ollama_eval_prompt failed' + if [[ -z "$task" ]]; then + _error "Task not found. Usage: oe [-m model] [-x] [-c] [-t] \"task\"" return 1 fi - _debug "ollama_eval: _eval_model: [${_eval_model:0:240}]" - _debug "ollama_eval: _eval_prompt: [${_eval_prompt:0:240}]" - - printf '\n%s generated the command:\n\n' "$_eval_model" - - OLLAMA_LIB_STREAM=0 - local json_result - json_result="$(ollama_generate_json -m "$_eval_model" -p "$_eval_prompt")" - - if [[ -z "$json_result" ]]; then - _error 'ollama_eval: ollama_generate_json response empty' - return 1 - fi - - if ! _is_valid_json "$json_result"; then - _error 'ollama_eval: ollama_generate_json response invalid json' - return 1 - fi - - local cmd - cmd="$(printf '%s' "$json_result" | jq -r '.response // empty')" - _debug "ollama_eval: cmd: [${cmd:0:240}]" - if [[ -z "$cmd" ]]; then - _error 'ollama_eval: error extracting response' - return 1 - fi - - printf '%s\n\n' "$cmd" - - if ! _ollama_eval_check_sanity "$cmd"; then - _error 'ollama_eval: cmd failed sanity check' - return 1 + # Model selection logic + local model_to_use + if [[ -n "$model" ]]; then + export OE_MODEL="$model" + model_to_use="$model" + if (( explain )); then + printf 'Using model: %s\n' "$model_to_use" >&2 + printf 'To make this your default: export OE_MODEL=%s\n' "$model_to_use" >&2 + fi + elif [[ -n "$OE_MODEL" ]]; then + model_to_use="$OE_MODEL" + else + model_to_use="$(ollama_model_random)" + if [[ -z "$model_to_use" ]]; then + _error "No models found. Please specify a model with -m or set OE_MODEL." + return 1 + fi fi - if ! _ollama_eval_check_syntax "$cmd"; then - _error 'ollama_eval: cmd failed syntax check' - return 1 + # Prompt generation logic + local prompt + if (( create_file )); then + prompt="Create a plain text file based on this description:\n" + prompt+="$task\n" + prompt+="Return only the raw content of the file.\n" + prompt+="Do not include explanations or formatting outside the file content." + elif (( explain )); then + prompt="Generate a safe, POSIX-compliant Bash one-liner.\n" + prompt+="Task: $task\n" + prompt+="Add a short explanation as a Bash comment (# comment) before the command.\n" + prompt+="Output only the comment and command." + else + prompt="Generate a safe, POSIX-compliant Bash one-liner.\n" + prompt+="Task: $task\n" + prompt+="Return only the command." fi - if ! _ollama_eval_check_danger "$cmd"; then - _error 'ollama_eval: cmd failed danger check' + # Call Ollama + OLLAMA_LIB_STREAM=0 + local result + result="$(ollama_generate -m "$model_to_use" -p "$prompt")" + local exit_code=$? + if (( exit_code != 0 )); then + _error "Failed to get response from Ollama." return 1 fi - _ollama_eval_permission_sandbox "$cmd" - case $? in - 0) return 0 ;; # Command was run in sandbox - 1) return 1 ;; # User aborted - 2) : ;; # User requested dangerous mode - esac + local filtered_output + filtered_output="$(_filter_output "$result" "$explain")" - _ollama_eval_permission_eval "$cmd" + printf '%s' "$filtered_output" } oe "$@"