Skip to content

Commit 4a2e863

Browse files
committed
Enhance AI review process by integrating gptme with Ollama backend and updating model configurations
Signed-off-by: Tiger Kaovilai <[email protected]>
1 parent 2c8f6b4 commit 4a2e863

File tree

1 file changed

+67
-60
lines changed

1 file changed

+67
-60
lines changed

Makefile

Lines changed: 67 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -665,7 +665,8 @@ Review this git diff for a project called OADP (OpenShift API for Data Protectio
665665
3. Go idioms and conventions \
666666
4. Kubernetes/OpenShift operator patterns \
667667
5. Security concerns \
668-
Please provide actionable feedback. Be concise but thorough.
668+
Please provide actionable feedback. Be concise but thorough. \
669+
If able, browse linked URLs for context.
669670
endef
670671

671672
# AI code review using Ollama on Podman
@@ -681,8 +682,8 @@ endef
681682
# - Stop and remove the container (but preserve the .ollama cache)
682683
#
683684
# Usage:
684-
# make ai-review-ollama # Uses default model (llama3.2:1b)
685-
# make ai-review-ollama OLLAMA_MODEL=phi3:mini # Uses specified model
685+
# make ai-review-gptme-ollama # Uses default model (llama3.2:1b)
686+
# make ai-review-gptme-ollama OLLAMA_MODEL=phi3:mini # Uses specified model
686687
#
687688
# Available models (examples):
688689
# Small models (< 2GB memory):
@@ -701,25 +702,38 @@ endef
701702
# - mistral
702703

703704
# Default Ollama model (using a smaller model that requires less memory)
704-
OLLAMA_MODEL ?= gemma3n:e4b
705-
OLLAMA_MEMORY ?= 8 # will require at least this much free mem in your machine or podman machine (non-linux)
706-
707-
.PHONY: ai-review-ollama
708-
ai-review-ollama: ## Review staged git changes using Ollama AI. Requires changes to be staged with 'git add'
709-
@# This target reviews only staged changes. To stage changes, use:
710-
@# git add <files>
711-
@# To verify staged changes, run:
712-
@# git status
713-
@# Example output showing staged changes:
714-
@# Changes to be committed:
715-
@# (use "git restore --staged <file>..." to unstage)
716-
@# modified: Makefile
705+
OLLAMA_MODEL ?= gemma3:12b
706+
OLLAMA_MEMORY ?= 9 # will require at least this much free mem in your machine or podman machine (non-linux)
707+
708+
# This target reviews staged changes using gptme with Ollama backend
709+
# Prerequisites:
710+
# - gptme installed (pip install gptme)
711+
# - Podman installed and running
712+
#
713+
# This target will:
714+
# - Create a local .ollama directory for caching models between runs
715+
# - Start an Ollama container if not already running
716+
# - Pull the model if not already cached
717+
# - Run the code review with gptme
718+
# - Stop and remove the container (but preserve the .ollama cache)
719+
#
720+
# This version enables tools for enhanced review:
721+
# - read: Read local files for context (always enabled)
722+
# - browser: Browse documentation and references (only if lynx is installed)
723+
#
724+
# Usage:
725+
# make ai-review-gptme-ollama # Uses default Ollama model
726+
# make ai-review-gptme-ollama GPTME_OLLAMA_MODEL=phi3 # Uses specific model
727+
.PHONY: ai-review-gptme-ollama
728+
ai-review-gptme-ollama: TOOLS = $(shell command -v lynx >/dev/null 2>&1 && echo "read,browser" || echo "read")
729+
ai-review-gptme-ollama: gptme ## Review staged git changes using gptme with local Ollama models (auto-manages Ollama container)
717730
@if [ -z "$$(git diff --cached --name-only)" ]; then \
718731
echo "No staged changes to review."; \
719732
echo "Please stage your changes first with 'git add <files>'"; \
720733
echo "Run 'git status' to see which files are staged."; \
721734
exit 0; \
722735
fi
736+
@# gptme is installed as a dependency, no need to check
723737
@# Check if Ollama is already available (either as existing container or local service)
724738
@if curl -s http://localhost:11434/api/tags >/dev/null 2>&1; then \
725739
echo "Ollama is already running on port 11434"; \
@@ -753,58 +767,51 @@ ai-review-ollama: ## Review staged git changes using Ollama AI. Requires changes
753767
fi \
754768
fi
755769
@# Pull model if not already cached
756-
@echo "Ensuring $(OLLAMA_MODEL) model is available..."
770+
@echo "Ensuring $(GPTME_OLLAMA_MODEL) model is available..."
757771
@if podman ps | grep -q ollama; then \
758-
podman exec ollama ollama pull $(OLLAMA_MODEL) || exit 1; \
772+
podman exec ollama ollama pull $(GPTME_OLLAMA_MODEL) || exit 1; \
759773
else \
760-
curl -s -X POST http://localhost:11434/api/pull -d '{"name":"$(OLLAMA_MODEL)"}' | while read line; do \
774+
curl -s -X POST http://localhost:11434/api/pull -d '{"name":"$(GPTME_OLLAMA_MODEL)"}' | while read line; do \
761775
echo $$line | jq -r .status 2>/dev/null || echo $$line; \
762776
done; \
763777
fi
764-
@echo "Reviewing staged changes with Ollama using $(OLLAMA_MODEL)..."
765-
@# Generate the prompt with git diff
766-
@echo "Preparing request..."; \
767-
FULL_PROMPT="$(AI_REVIEW_PROMPT)\n\nHere is the git diff:\n"; \
768-
DIFF=$$(git diff --cached | jq -Rs .); \
769-
JSON=$$(jq -n \
770-
--arg model "$(OLLAMA_MODEL)" \
771-
--arg prompt "$$FULL_PROMPT" \
772-
--argjson diff "$$DIFF" \
773-
'{model: $$model, prompt: ($$prompt + $$diff), stream: false}'); \
774-
if [ -n "$$DEBUG" ]; then \
775-
echo "Debug: Request JSON:"; \
776-
echo "$$JSON" | jq .; \
777-
fi; \
778-
echo "Sending request to Ollama API..."; \
779-
TEMP_RESPONSE=$$(mktemp .ollama-response.XXXXXX); \
780-
curl -s -X POST http://localhost:11434/api/generate \
781-
-H "Content-Type: application/json" \
782-
--max-time 300 \
783-
-d "$$JSON" \
784-
-o "$$TEMP_RESPONSE" 2>&1; \
785-
if [ -n "$$DEBUG" ]; then \
786-
echo "Debug: Response saved to $$TEMP_RESPONSE"; \
787-
cp "$$TEMP_RESPONSE" .ollama-debug-response.txt; \
788-
fi; \
789-
if jq -e . "$$TEMP_RESPONSE" >/dev/null 2>&1; then \
790-
jq -r '.response // .error // "No response field"' "$$TEMP_RESPONSE"; \
791-
rm -f "$$TEMP_RESPONSE"; \
778+
@echo "Reviewing staged changes with gptme using Ollama model: $(GPTME_OLLAMA_MODEL)..."
779+
@if [ "$(TOOLS)" = "read,browser" ]; then \
780+
echo "gptme will be able to read files and browse documentation for context."; \
792781
else \
793-
echo "Error: Invalid JSON response from Ollama. Checking for common issues..."; \
794-
if grep -q "404 page not found" "$$TEMP_RESPONSE" 2>/dev/null; then \
795-
echo "Error: Ollama API endpoint not found. The container may not be ready."; \
796-
elif grep -q "Connection refused" "$$TEMP_RESPONSE" 2>/dev/null; then \
797-
echo "Error: Cannot connect to Ollama. The container may not be running."; \
798-
else \
799-
echo "Raw response (first 500 chars):"; \
800-
head -c 500 "$$TEMP_RESPONSE" 2>/dev/null || echo "(empty response)"; \
801-
echo "..."; \
802-
echo "(Run with DEBUG=1 to save full response)"; \
803-
fi; \
804-
rm -f "$$TEMP_RESPONSE"; \
782+
echo "gptme will be able to read files for context (install lynx to enable browser tool)."; \
805783
fi
806-
@# Only stop and remove container if we started it
784+
@# Generate the review using gptme with Ollama backend
785+
@git diff --cached | OPENAI_BASE_URL="http://localhost:11434/v1" $(GPTME) "$(AI_REVIEW_PROMPT)" \
786+
--model "local/$(GPTME_OLLAMA_MODEL)" \
787+
--tools "$(TOOLS)" \
788+
--non-interactive
789+
# Only stop and remove container if we started it
807790
@if podman ps | grep -q ollama; then \
808791
echo "Stopping and removing Ollama container..."; \
809792
podman stop ollama && podman rm ollama; \
810793
fi
794+
795+
# Default Ollama model for gptme (should match one of the models available in your Ollama installation)
796+
GPTME_OLLAMA_MODEL ?= $(OLLAMA_MODEL)
797+
798+
# gptme installation
799+
GPTME = $(LOCALBIN)/gptme
800+
GPTME_VERSION ?= latest
801+
.PHONY: gptme
802+
gptme: $(GPTME) ## Install gptme locally if necessary.
803+
$(GPTME): $(LOCALBIN)
804+
@if [ -f $(GPTME) ] && $(GPTME) --version >/dev/null 2>&1; then \
805+
echo "gptme is already installed at $(GPTME)"; \
806+
else \
807+
echo "Installing gptme..."; \
808+
python3 -m venv $(LOCALBIN)/gptme-venv || (echo "Error: python3 venv module not found. Please install python3-venv package." && exit 1); \
809+
$(LOCALBIN)/gptme-venv/bin/pip install --upgrade pip; \
810+
if [ "$(GPTME_VERSION)" = "latest" ]; then \
811+
$(LOCALBIN)/gptme-venv/bin/pip install gptme; \
812+
else \
813+
$(LOCALBIN)/gptme-venv/bin/pip install gptme==$(GPTME_VERSION); \
814+
fi; \
815+
ln -sf $(LOCALBIN)/gptme-venv/bin/gptme $(GPTME); \
816+
echo "gptme installed successfully at $(GPTME)"; \
817+
fi

0 commit comments

Comments
 (0)