|
9 | 9 | from uuid import UUID |
10 | 10 |
|
11 | 11 | import numpy as np |
12 | | -import requests |
13 | | -from datasets import Dataset as HFDataset |
14 | 12 | from pydantic import BaseModel, field_validator |
15 | 13 |
|
16 | | -from ragas._version import __version__ |
17 | | -from ragas.callbacks import ChainRunEncoder, parse_run_traces |
| 14 | +from ragas.callbacks import parse_run_traces |
18 | 15 | from ragas.cost import CostCallbackHandler |
19 | | -from ragas.exceptions import UploadException |
20 | 16 | from ragas.messages import AIMessage, HumanMessage, ToolCall, ToolMessage |
21 | | -from ragas.sdk import ( |
22 | | - RAGAS_API_SOURCE, |
23 | | - build_evaluation_app_url, |
24 | | - check_api_response, |
25 | | - get_api_url, |
26 | | - get_app_token, |
27 | | - get_app_url, |
28 | | - upload_packet, |
29 | | -) |
30 | 17 | from ragas.utils import safe_nanmean |
31 | 18 |
|
32 | 19 | if t.TYPE_CHECKING: |
@@ -537,48 +524,6 @@ def total_cost( |
537 | 524 | cost_per_input_token, cost_per_output_token, per_model_costs |
538 | 525 | ) |
539 | 526 |
|
540 | | - def upload( |
541 | | - self, |
542 | | - verbose: bool = True, |
543 | | - ) -> str: |
544 | | - from datetime import datetime, timezone |
545 | | - |
546 | | - timestamp = datetime.now(timezone.utc).isoformat() |
547 | | - root_trace = [ |
548 | | - trace for trace in self.ragas_traces.values() if trace.parent_run_id is None |
549 | | - ][0] |
550 | | - packet = json.dumps( |
551 | | - { |
552 | | - "run_id": str(root_trace.run_id), |
553 | | - "created_at": timestamp, |
554 | | - "evaluation_run": [t.model_dump() for t in self.ragas_traces.values()], |
555 | | - }, |
556 | | - cls=ChainRunEncoder, |
557 | | - ) |
558 | | - response = upload_packet( |
559 | | - path="/alignment/evaluation", |
560 | | - data_json_string=packet, |
561 | | - ) |
562 | | - |
563 | | - # check status codes |
564 | | - app_url = get_app_url() |
565 | | - evaluation_app_url = build_evaluation_app_url(app_url, root_trace.run_id) |
566 | | - if response.status_code == 409: |
567 | | - # this evalution already exists |
568 | | - if verbose: |
569 | | - print(f"Evaluation run already exists. View at {evaluation_app_url}") |
570 | | - return evaluation_app_url |
571 | | - elif response.status_code != 200: |
572 | | - # any other error |
573 | | - raise UploadException( |
574 | | - status_code=response.status_code, |
575 | | - message=f"Failed to upload results: {response.text}", |
576 | | - ) |
577 | | - |
578 | | - if verbose: |
579 | | - print(f"Evaluation results uploaded! View at {evaluation_app_url}") |
580 | | - return evaluation_app_url |
581 | | - |
582 | 527 |
|
583 | 528 | class PromptAnnotation(BaseModel): |
584 | 529 | prompt_input: t.Dict[str, t.Any] |
@@ -642,63 +587,6 @@ def from_json(cls, path: str, metric_name: t.Optional[str]) -> "MetricAnnotation |
642 | 587 | dataset = json.load(open(path)) |
643 | 588 | return cls._process_dataset(dataset, metric_name) |
644 | 589 |
|
645 | | - @classmethod |
646 | | - def from_app( |
647 | | - cls, |
648 | | - run_id: str, |
649 | | - metric_name: t.Optional[str] = None, |
650 | | - ) -> "MetricAnnotation": |
651 | | - """ |
652 | | - Fetch annotations from a URL using either evaluation result or run_id |
653 | | -
|
654 | | - Parameters |
655 | | - ---------- |
656 | | - run_id : str |
657 | | - Direct run ID to fetch annotations |
658 | | - metric_name : str, optional |
659 | | - Name of the specific metric to filter |
660 | | -
|
661 | | - Returns |
662 | | - ------- |
663 | | - MetricAnnotation |
664 | | - Annotation data from the API |
665 | | -
|
666 | | - Raises |
667 | | - ------ |
668 | | - ValueError |
669 | | - If run_id is not provided |
670 | | - """ |
671 | | - if run_id is None: |
672 | | - raise ValueError("run_id must be provided") |
673 | | - |
674 | | - endpoint = f"/api/v1/alignment/evaluation/annotation/{run_id}" |
675 | | - |
676 | | - app_token = get_app_token() |
677 | | - base_url = get_api_url() |
678 | | - app_url = get_app_url() |
679 | | - |
680 | | - response = requests.get( |
681 | | - f"{base_url}{endpoint}", |
682 | | - headers={ |
683 | | - "Content-Type": "application/json", |
684 | | - "x-app-token": app_token, |
685 | | - "x-source": RAGAS_API_SOURCE, |
686 | | - "x-app-version": __version__, |
687 | | - }, |
688 | | - ) |
689 | | - |
690 | | - check_api_response(response) |
691 | | - dataset = response.json()["data"] |
692 | | - |
693 | | - if not dataset: |
694 | | - evaluation_url = build_evaluation_app_url(app_url, run_id) |
695 | | - raise ValueError( |
696 | | - f"No annotations found. Please annotate the Evaluation first then run this method. " |
697 | | - f"\nNote: you can annotate the evaluations using the Ragas app by going to {evaluation_url}" |
698 | | - ) |
699 | | - |
700 | | - return cls._process_dataset(dataset, metric_name) |
701 | | - |
702 | 590 | def __len__(self): |
703 | 591 | return sum(len(value) for value in self.root.values()) |
704 | 592 |
|
|
0 commit comments