Skip to content

Commit 623b27b

Browse files
committed
[FEAT] minIO test.txt 파일 받기
1 parent 0ee534f commit 623b27b

File tree

6 files changed

+91
-3
lines changed

6 files changed

+91
-3
lines changed

api/main.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
from fastapi import APIRouter
2-
from api.routes import user, ranking, question, theme, notification, answer, discussion, comment, badges
2+
from api.routes import user, ranking, question, theme, notification, answer, discussion, comment, badges, download_model
33

44
api_router = APIRouter()
55

@@ -11,4 +11,5 @@
1111
api_router.include_router(answer.router, prefix='/answer', tags=["answer"])
1212
api_router.include_router(discussion.router, prefix='/discussion', tags=["discussion"])
1313
api_router.include_router(comment.router, prefix='/comment', tags=["comment"])
14-
api_router.include_router(badges.router, prefix='/badges', tags=["badges"])
14+
api_router.include_router(badges.router, prefix='/badges', tags=["badges"])
15+
api_router.include_router(download_model.router, prefix='/download_model', tags=["download_model"])

api/routes/download_model.py

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
from fastapi import APIRouter
2+
# from models.model_loader import llama_model
3+
from core.minio_service import download_model_from_minio
4+
import os
5+
6+
router = APIRouter()
7+
8+
@router.get("/")
9+
async def download_model():
10+
"""
11+
MinIO에서 모델 다운로드 API
12+
"""
13+
model_s3_path = "test_folder/test.txt" # MinIO에 저장된 경로
14+
local_path = "./downloaded_model/test.txt" # local 저장 경로
15+
16+
result = download_model_from_minio(model_s3_path, local_path)
17+
18+
if result:
19+
return {"message": "✅ 모델 다운로드 성공!", "local_path": result}
20+
else:
21+
return {"error": "❌ 모델 다운로드 실패"}
22+

core/minio_service.py

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
import boto3
2+
import os
3+
from dotenv import load_dotenv
4+
5+
# .env 로드
6+
load_dotenv(override=True)
7+
8+
MINIO_URL = os.getenv("MINIO_URL")
9+
AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID")
10+
AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY")
11+
S3_BUCKET_NAME = os.getenv("S3_BUCKET_NAME")
12+
13+
# MinIO 클라이언트 생성
14+
s3 = boto3.client(
15+
"s3",
16+
endpoint_url=f"http://{MINIO_URL}",
17+
aws_access_key_id=AWS_ACCESS_KEY_ID,
18+
aws_secret_access_key=AWS_SECRET_ACCESS_KEY
19+
)
20+
21+
def download_model_from_minio(model_path: str, local_save_path: str):
22+
"""
23+
MinIO에서 파일 다운로드 (예: test.txt)
24+
:param model_path: MinIO 내 저장된 파일 경로 (예: test_folder/test.txt)
25+
:param local_save_path: 로컬에 저장할 경로 (예: ./downloaded_model/test.txt)
26+
"""
27+
try:
28+
os.makedirs(os.path.dirname(local_save_path), exist_ok=True) # 로컬 저장 폴더 생성
29+
s3.download_file(S3_BUCKET_NAME, model_path, local_save_path)
30+
print(f"✅ {model_path} 다운로드 완료 → {local_save_path}")
31+
return local_save_path
32+
except Exception as e:
33+
print(f"❌ MinIO에서 {model_path} 다운로드 실패: {e}")
34+
return None

downloaded_model/test.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
MinIO ���� �׽�Ʈ

models/model_loader.py

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
import os
2+
import torch
3+
from config import minio_client, bucket_name
4+
5+
MODEL_PATH = "models/llama_model.safetensors"
6+
7+
# 모델 다운로드 함수
8+
def download_model():
9+
"""MinIO에서 모델을 다운로드하여 로컬에 저장"""
10+
if not os.path.exists(MODEL_PATH):
11+
print("Downloading model from MinIO...")
12+
minio_client.fget_object(bucket_name, "llama/finetuned_model.safetensors", MODEL_PATH)
13+
print("Model downloaded.")
14+
15+
# 모델 로드 함수
16+
def load_model():
17+
"""모델을 로드하고 FastAPI에서 사용 가능하게 설정"""
18+
download_model() # MinIO에서 모델 다운로드
19+
print("Loading Llama model...")
20+
model = torch.load(MODEL_PATH, map_location="cpu") # CPU 또는 GPU로 로드
21+
model.eval() # 모델을 평가 모드로 설정
22+
print("Model loaded successfully!")
23+
return model
24+
25+
# 모델 로드
26+
llama_model = load_model()

requirements.txt

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,4 +7,8 @@ sqlalchemy
77
pymysql
88
websockets
99
redis
10-
httpx
10+
httpx
11+
torch
12+
minio
13+
boto3
14+
dotenv

0 commit comments

Comments
 (0)