Skip to content

Commit f2bcf78

Browse files
author
EL BADOURI Youssef
committed
added docker compose
1 parent 5c21b0b commit f2bcf78

File tree

11 files changed

+279
-130
lines changed

11 files changed

+279
-130
lines changed

README.md

Lines changed: 5 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -79,20 +79,9 @@ Before you begin, ensure you have met the following requirements:
7979
git clone https://github.com/MonsefRH/SafeOps
8080
```
8181

82-
2. **Install dependencies**
83-
```bash
84-
# Install frontend dependencies
85-
cd frontend
86-
87-
npm install
88-
89-
# Install Python backend dependencies
90-
cd backend
9182

92-
pip install -r requirements.txt
93-
```
9483

95-
3. **Environment setup**
84+
2. **Environment setup in the backend**
9685
```bash
9786
# Copy environment template
9887
cp .env.example .env
@@ -101,59 +90,18 @@ Before you begin, ensure you have met the following requirements:
10190
nano .env
10291
```
10392

104-
4. **Database Setup**
105-
106-
***Step 1: Connect to PostgreSQL***
107-
```bash
108-
psql -U postgres
109-
```
110-
111-
***Step 2: Create Database***
112-
```sql
113-
-- In the psql terminal
114-
CREATE DATABASE safeops;
115-
\q
116-
```
117-
118-
***Step 3: Create the admin user***
119-
```bash
120-
psql -U postgres -d safeops -f base.sql
121-
122-
```
123-
***Step 4: Import Database Schema***
124-
```bash
125-
psql -U postgres
126-
127-
-- In the psql terminal
128-
129-
INSERT INTO users (name, email, password, role)
130-
VALUES (
131-
'Admin ',
132-
133-
'$2b$12$YOUR_HASHED_PASSWORD', -- Replace with a bcrypt hashed password
134-
'admin'
135-
);
136-
\q
137-
```
138-
---
139-
140-
**Note:** Replace `postgres` with your actual PostgreSQL username.
141-
14293
## Usage
14394

144-
### Development Mode
95+
96+
Start the full application (backend, frontend, and database) with:
14597

14698
1. **Start the backend server**
14799
```bash
148-
python app.py
149-
```
100+
docker compose up -d --build
150101

151-
2. **Start the frontend development server**
152-
```bash
153-
npm start
154102
```
155103

156-
3. **Access the application**
104+
2. **Access the application**
157105
- Frontend: `http://localhost:3000`
158106
- API: `http://localhost:5000`
159107

backend/Dockerfile

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
FROM python:3.12-slim
2+
3+
WORKDIR /app
4+
5+
# + git pour GitPython
6+
RUN apt-get update && apt-get install -y \
7+
build-essential libpq-dev git \
8+
&& rm -rf /var/lib/apt/lists/*
9+
10+
COPY requirements.txt .
11+
RUN pip install --no-cache-dir -r requirements.txt
12+
13+
COPY . .
14+
15+
EXPOSE 5000
16+
CMD ["flask", "run", "--host=0.0.0.0", "--port=5000"]

backend/requirements.txt

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,9 @@ transformers
1919
torch
2020
flask_sqlalchemy
2121
semgrep
22-
check
22+
checkov
2323
flasgger
24-
apispec-pydantic-plugin
24+
apispec-pydantic-plugin
25+
flasgger
26+
apispec
27+
apispec_pydantic_plugin

backend/services/report_service.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -157,6 +157,7 @@ def send_csv_report_email(
157157
csv_path: str,
158158
csv_filename: str,
159159
user_name: str | None = None,
160+
download_url: str | None = None,
160161
):
161162
"""Send an HTML email with SafeOps blue style and attach the CSV report."""
162163
msg = EmailMessage()

backend/services/risks_service.py

Lines changed: 104 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1,23 +1,74 @@
11
from utils.db import db
22
from models.scan_history import ScanHistory
33
import logging
4+
import json
45

5-
# Configure logging
6+
# Configure logging (keep yours)
67
logging.basicConfig(
78
level=logging.DEBUG,
89
format='%(asctime)s %(levelname)s:%(name)s: %(message)s',
910
handlers=[logging.FileHandler('risks_app.log'), logging.StreamHandler()]
1011
)
1112
logger = logging.getLogger(__name__)
1213

14+
# Map various tool severities to your 3 buckets
15+
def _normalize_severity(raw: str) -> str:
16+
if not raw:
17+
return "INFO"
18+
s = str(raw).strip().upper()
19+
# Checkov: CRITICAL/HIGH/MEDIUM/LOW/INFO
20+
# Semgrep: ERROR/WARNING/INFO
21+
if s in {"CRITICAL", "HIGH", "ERROR"}:
22+
return "ERROR"
23+
if s in {"MEDIUM", "WARNING"}:
24+
return "WARNING"
25+
return "INFO"
26+
27+
def _ensure_dict(obj):
28+
# Accept dict or JSON string
29+
if obj is None:
30+
return {}
31+
if isinstance(obj, dict):
32+
return obj
33+
if isinstance(obj, str):
34+
try:
35+
return json.loads(obj)
36+
except json.JSONDecodeError:
37+
logger.warning("scan_result is a non-JSON string; ignoring.")
38+
return {}
39+
# Unexpected type
40+
logger.warning(f"scan_result has unexpected type: {type(obj)}")
41+
return {}
42+
43+
def _extract_checkov_failed_checks(sr: dict):
44+
"""
45+
Checkov commonly returns either:
46+
- {"failed_checks": [...]}
47+
- {"results": {"failed_checks": [...], "parsing_errors": [...], ...}}
48+
"""
49+
if "failed_checks" in sr and isinstance(sr["failed_checks"], list):
50+
return sr["failed_checks"]
51+
results = sr.get("results", {})
52+
if isinstance(results, dict) and isinstance(results.get("failed_checks"), list):
53+
return results["failed_checks"]
54+
return []
55+
56+
def _extract_semgrep_findings(sr: dict):
57+
"""
58+
Semgrep JSON v1: {"results": [ { "check_id": "...", "path": "...", "start": {...}, "extra": {"severity": "ERROR", "message": "..."} }, ... ] }
59+
"""
60+
results = sr.get("results")
61+
if isinstance(results, list):
62+
return results
63+
return []
64+
1365
def get_risks(user_id):
14-
"""Fetch and aggregate risks for a user."""
66+
"""Fetch and aggregate risks for a user (Checkov + Semgrep)."""
1567
if not user_id:
1668
logger.error("Missing user_id for risks")
1769
raise ValueError("user_id is required")
1870

1971
try:
20-
# Fetch scan history
2172
scans = (
2273
ScanHistory.query
2374
.filter_by(user_id=user_id)
@@ -26,40 +77,65 @@ def get_risks(user_id):
2677
.all()
2778
)
2879

29-
# Aggregate risks by severity
3080
severity_counts = {"ERROR": 0, "WARNING": 0, "INFO": 0}
3181
detailed_risks = []
3282

3383
for scan in scans:
34-
scan_result = scan.scan_result
35-
scan_type = scan.scan_type
36-
if "failed_checks" in scan_result:
37-
failed_checks = scan_result["failed_checks"]
38-
elif "results" in scan_result and "failed_checks" in scan_result["results"]:
39-
failed_checks = scan_result["results"]["failed_checks"]
84+
scan_result = _ensure_dict(getattr(scan, "scan_result", None))
85+
scan_type = getattr(scan, "scan_type", "unknown")
86+
87+
# 1) Try Checkov shape(s)
88+
failed_checks = _extract_checkov_failed_checks(scan_result)
89+
90+
# 2) If no Checkov failed checks, try Semgrep shape
91+
semgrep_results = []
92+
if not failed_checks:
93+
semgrep_results = _extract_semgrep_findings(scan_result)
94+
95+
# ---- Aggregate Checkov findings ----
4096
for check in failed_checks:
41-
severity = check.get("severity") or "INFO" # Handles None explicitly
42-
# Normalize to uppercase for counting (in case Checkov uses "high" or "High")
43-
severity_upper = severity.upper() if severity else "INFO"
44-
# Only count if it's one of our expected keys (avoids errors if weird values)
45-
if severity_upper in severity_counts:
46-
severity_counts[severity_upper] += 1
47-
else:
48-
severity_counts["INFO"] += 1 # Fallback for unknown severities
97+
sev = _normalize_severity(check.get("severity"))
98+
severity_counts[sev] += 1
99+
100+
# Common Checkov fields
49101
detailed_risks.append({
50-
"severity": severity,
102+
"severity": sev,
51103
"check_id": check.get("check_id"),
52-
"file_path": check.get("file_path"),
53-
"message": check.get("message"),
54-
"suggestion": check.get("suggestion"),
55-
"scan_type": scan_type
104+
"file_path": check.get("file_path") or check.get("file") or check.get("resource"),
105+
"message": check.get("check_name") or check.get("message"),
106+
"suggestion": check.get("guideline") or check.get("remediation") or check.get("description"),
107+
"scan_type": scan_type or "checkov"
108+
})
109+
110+
# ---- Aggregate Semgrep findings ----
111+
for item in semgrep_results:
112+
# Semgrep severity in extra.severity
113+
raw_sev = None
114+
extra = item.get("extra") if isinstance(item.get("extra"), dict) else {}
115+
if extra:
116+
raw_sev = extra.get("severity")
117+
sev = _normalize_severity(raw_sev)
118+
severity_counts[sev] += 1
119+
120+
# Semgrep fields
121+
file_path = item.get("path") or item.get("file")
122+
message = (extra.get("message") if isinstance(extra, dict) else None) or item.get("message")
123+
check_id = item.get("check_id") or (extra.get("engine_name") if isinstance(extra, dict) else None)
124+
125+
detailed_risks.append({
126+
"severity": sev,
127+
"check_id": check_id,
128+
"file_path": file_path,
129+
"message": message,
130+
"suggestion": extra.get("metadata", {}).get("fix") if isinstance(extra.get("metadata"), dict) else None,
131+
"scan_type": scan_type or "semgrep"
56132
})
57133

58-
# Format risks for dashboard
134+
# Format risks for dashboard (keep your scaling)
59135
risks = [
60-
{"name": "Critical (ERROR)", "level": severity_counts.get("ERROR", 0) * 10}, # Scale for display
61-
{"name": "High (WARNING)", "level": severity_counts.get("WARNING", 0) * 5},
62-
{"name": "Low (INFO)", "level": severity_counts.get("INFO", 0) * 2}
136+
{"name": "Critical (ERROR)", "level": severity_counts.get("ERROR", 0) * 10},
137+
{"name": "High (WARNING)", "level": severity_counts.get("WARNING", 0) * 5},
138+
{"name": "Low (INFO)", "level": severity_counts.get("INFO", 0) * 2},
63139
]
64140

65141
logger.info(f"Fetched risks for user_id {user_id}: {len(detailed_risks)} detailed risks")
@@ -71,4 +147,4 @@ def get_risks(user_id):
71147
except Exception as e:
72148
logger.error(f"Failed to fetch risks for user_id {user_id}: {str(e)}")
73149
db.session.rollback()
74-
raise RuntimeError("Failed to fetch risks")
150+
raise RuntimeError("Failed to fetch risks")

0 commit comments

Comments
 (0)