Skip to content

Commit 5603712

Browse files
committed
feat: add router endpoints with tests
1 parent 3e494a1 commit 5603712

File tree

9 files changed

+248
-15
lines changed

9 files changed

+248
-15
lines changed

.github/workflows/test.yml

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,12 +36,11 @@ jobs:
3636
run: poetry install
3737

3838
- name: Run tests
39-
run: poetry run pytest
39+
run: make test
4040

4141
- name: Check code quality
4242
run: |
43-
poetry run black --check babeltron/
44-
poetry run isort --check-only babeltron/
43+
make lint
4544
4645
- name: Upload coverage reports to Codecov
4746
uses: codecov/codecov-action@v3

.gitignore

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -169,3 +169,6 @@ cython_debug/
169169

170170
# PyPI configuration file
171171
.pypirc
172+
173+
# Models
174+
models/*

Makefile

Lines changed: 76 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,7 @@
1-
.PHONY: check-poetry install test lint format help system-deps coverage coverage-html
1+
.PHONY: check-poetry install test lint format help system-deps coverage coverage-html download-model download-model-small download-model-medium download-model-large serve serve-prod docker-build docker-run docker-compose-up docker-compose-down
2+
3+
# Define model path variable with default value, can be overridden by environment
4+
MODEL_PATH ?= ./models
25

36
# Extract target descriptions from comments
47
help: ## Show this help message
@@ -40,11 +43,10 @@ install: check-poetry system-deps ## Install project dependencies
4043

4144
test: check-poetry ## Run tests
4245
@echo "Running tests..."
43-
@poetry run pytest
46+
@poetry run pytest tests/unit
4447

4548
lint: check-poetry ## Run linters
4649
@echo "Running linters..."
47-
@poetry run flake8 babeltron
4850
@poetry run isort --check babeltron
4951
@poetry run black --check babeltron
5052

@@ -55,8 +57,77 @@ format: check-poetry ## Format code
5557

5658
coverage: check-poetry ## Run tests with coverage report
5759
@echo "Running tests with coverage..."
58-
@poetry run pytest --cov=babeltron --cov-report=term-missing
60+
@poetry run pytest tests/unit --cov=babeltron --cov-report=term
5961

6062
coverage-html: check-poetry ## Generate HTML coverage report
6163
@echo "Generating HTML coverage report..."
62-
@poetry run pytest --cov=babeltron --cov-report=html
64+
@poetry run pytest tests/unit --cov=babeltron --cov-report=html
65+
@echo "HTML coverage report generated in htmlcov/ directory"
66+
@echo "Open htmlcov/index.html in your browser to view the report"
67+
68+
# Model download commands
69+
download-model: download-model-small ## Download the default (small) translation model
70+
71+
download-model-small: check-poetry ## Download small translation model (418M parameters, ~1GB)
72+
@echo "Downloading small translation model (418M parameters)..."
73+
@poetry run python -m babeltron.scripts.download_models --size 418M --output-dir $(MODEL_PATH)
74+
75+
download-model-medium: check-poetry ## Download medium translation model (1.2B parameters, ~2.5GB)
76+
@echo "Downloading medium translation model (1.2B parameters)..."
77+
@poetry run python -m babeltron.scripts.download_models --size 1.2B --output-dir $(MODEL_PATH)
78+
79+
download-model-large: check-poetry ## Download large translation model (12B parameters, ~24GB)
80+
@echo "Downloading large translation model (12B parameters)..."
81+
@poetry run python -m babeltron.scripts.download_models --size 12B --output-dir $(MODEL_PATH)
82+
83+
# Add these commands to your Makefile
84+
serve: check-poetry ## Run the API server locally
85+
@echo "Starting API server on http://localhost:8000..."
86+
@poetry run uvicorn babeltron.app.main:app --reload --host 0.0.0.0 --port 8000
87+
88+
serve-prod: check-poetry ## Run the API server in production mode (no reload)
89+
@echo "Starting API server in production mode on http://localhost:8000..."
90+
@poetry run uvicorn babeltron.app.main:app --host 0.0.0.0 --port 8000
91+
92+
# Docker commands
93+
docker-build: ## Build Docker image
94+
@echo "Building Docker image..."
95+
@docker build -t babeltron:latest .
96+
97+
docker-run: ## Run Docker container with model volume mount
98+
@echo "Checking for model files..."
99+
@if [ ! -d "$(MODEL_PATH)" ] || [ -z "$(shell ls -A $(MODEL_PATH) 2>/dev/null)" ]; then \
100+
echo "No model files found in $(MODEL_PATH) directory."; \
101+
read -p "Do you want to download the small model now? (y/n) " answer; \
102+
if [ "$$answer" = "y" ]; then \
103+
mkdir -p $(MODEL_PATH); \
104+
echo "Downloading small model..."; \
105+
poetry run python -m babeltron.scripts.download_models --size 418M --output-dir $(MODEL_PATH); \
106+
else \
107+
echo "Model download skipped. Container may not work properly."; \
108+
fi; \
109+
fi
110+
@echo "Running Docker container..."
111+
@docker run -p 8000:8000 -v $(shell pwd)/$(MODEL_PATH):/models babeltron:latest
112+
113+
docker-up: ## Build and start services with docker-compose
114+
@echo "Checking for model files..."
115+
@if [ ! -d "$(MODEL_PATH)" ] || [ -z "$(shell ls -A $(MODEL_PATH) 2>/dev/null)" ]; then \
116+
echo "No model files found in $(MODEL_PATH) directory."; \
117+
read -p "Do you want to download the small model now? (y/n) " answer; \
118+
if [ "$$answer" = "y" ]; then \
119+
mkdir -p $(MODEL_PATH); \
120+
echo "Downloading small model..."; \
121+
poetry run python -m babeltron.scripts.download_models --size 418M --output-dir $(MODEL_PATH); \
122+
else \
123+
echo "Model download skipped. Container may not work properly."; \
124+
fi; \
125+
fi
126+
@echo "Building and starting services with docker-compose..."
127+
@docker-compose up -d --build
128+
@echo "Services started successfully. API available at http://localhost:8000"
129+
@echo "API documentation available at http://localhost:8000/docs"
130+
131+
docker-down:
132+
@echo "Stopping docker-compose services..."
133+
@docker-compose down

README.md

Lines changed: 101 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
[![Tests](https://github.com/hspedro/babeltron/actions/workflows/test.yml/badge.svg)](https://github.com/hspedro/babeltron/actions/workflows/test.yml)
44
[![PyPI version](https://badge.fury.io/py/babeltron.svg)](https://badge.fury.io/py/babeltron)
55
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
6+
[![Coverage](https://img.shields.io/badge/coverage-90%25-brightgreen.svg)](https://github.com/hspedro/babeltron/actions/workflows/test.yml)
67

78
A Python-based REST API that leverages single multilingual models like mBERT to
89
provide efficient text translation services. Babeltron exposes a simple interface
@@ -68,6 +69,106 @@ The HTML report will be generated in the `htmlcov` directory.
6869

6970
The project uses a `.coveragerc` file to configure coverage settings. This ensures consistent coverage reporting across different environments.
7071

72+
## Downloading Translation Models
73+
74+
Babeltron requires a translation model to function. You can download models of different sizes depending on your needs and hardware constraints:
75+
76+
```bash
77+
# Download the small model (418M parameters, ~1GB disk space)
78+
make download-model
79+
80+
# Or download medium model (1.2B parameters, ~2.5GB disk space)
81+
make download-model-medium
82+
83+
# Or download large model (12B parameters, ~24GB disk space)
84+
make download-model-large
85+
```
86+
87+
### Model Size Considerations
88+
89+
- **Small (418M)**: ~1GB disk space, less memory required, faster but less accurate
90+
- **Medium (1.2B)**: ~2.5GB disk space, moderate memory requirements
91+
- **Large (12B)**: ~24GB disk space, requires significant RAM/GPU memory
92+
93+
Choose based on your hardware constraints and translation quality requirements.
94+
95+
## Running the API Server
96+
97+
After installing dependencies and downloading a model, you can run the API server locally:
98+
99+
```bash
100+
# Run the server in development mode with auto-reload
101+
make serve
102+
103+
# Or run in production mode (no auto-reload)
104+
make serve-prod
105+
```
106+
107+
The API will be available at http://localhost:8000.
108+
109+
### API Usage Examples
110+
111+
Once the server is running, you can use the translation API:
112+
113+
```bash
114+
# Translate text from English to Spanish
115+
curl -X POST "http://localhost:8000/translate" \
116+
-H "Content-Type: application/json" \
117+
-d '{
118+
"text": "Hello, how are you?",
119+
"src_lang": "en",
120+
"tgt_lang": "es"
121+
}'
122+
123+
# Response:
124+
# {"translation":"Hola, ¿cómo estás?"}
125+
```
126+
127+
You can also access the interactive API documentation at http://localhost:8000/docs.
128+
129+
## API Documentation
130+
131+
Babeltron provides interactive API documentation:
132+
133+
- **Swagger UI**: Available at http://localhost:8000/docs when the server is running
134+
- **ReDoc**: Available at http://localhost:8000/redoc for an alternative documentation view
135+
136+
These interactive documentation pages allow you to:
137+
- Explore all available endpoints
138+
- See request and response schemas
139+
- Test the API directly from your browser
140+
- View detailed descriptions of each endpoint and parameter
141+
142+
## Running with Docker
143+
144+
Babeltron can be run as a Docker container, which simplifies deployment and isolates dependencies.
145+
146+
### Building and Running with Docker
147+
148+
```bash
149+
# Start services with Docker Compose
150+
make docker-up
151+
```
152+
153+
The API will be available at http://localhost:8000.
154+
155+
### Stopping Docker Services
156+
157+
```bash
158+
# Stop services
159+
make docker-down
160+
```
161+
162+
### Docker Volume Mounts
163+
164+
The Docker setup mounts the local `./models` directory to `/models` inside the container. This allows you to:
165+
166+
1. Reuse downloaded models between container restarts
167+
2. Use different model sizes without rebuilding the image
168+
3. Persist models even if the container is removed
169+
170+
If no models are found when starting the container, you'll be prompted to download the small model automatically.
171+
71172
## License
72173

73174
MIT License

babeltron/main.py

Whitespace-only changes.

poetry.lock

Lines changed: 55 additions & 7 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pyproject.toml

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ authors = ["Pedro Soares <pedrofigueiredoc@gmail.com>"]
66
license = "MIT"
77
readme = "README.md"
88
repository = "https://github.com/hspedro/babeltron"
9+
packages = [{include = "babeltron"}]
910

1011
[tool.poetry.dependencies]
1112
python = "^3.9"
@@ -26,6 +27,7 @@ black = "^23.3.0"
2627
isort = "^5.12.0"
2728
flake8 = "^6.0.0"
2829
pytest-cov = "^4.1.0"
30+
httpx = "^0.27.0"
2931

3032
[build-system]
3133
requires = ["poetry-core"]
@@ -38,3 +40,12 @@ target-version = ["py38"]
3840
[tool.isort]
3941
profile = "black"
4042
line_length = 88
43+
44+
[tool.pytest]
45+
testpaths = ["tests/unit"]
46+
python_files = "test_*.py"
47+
python_functions = "test_*"
48+
49+
[tool.coverage.run]
50+
source = ["babeltron"]
51+
omit = ["tests/*"]

test/__init__.py

Whitespace-only changes.

test/test_main.py

Whitespace-only changes.

0 commit comments

Comments
 (0)