@@ -139,7 +139,16 @@ def get_required_columns(
139139 return self .required_columns
140140
141141 @abstractmethod
142- def init (self , run_config : RunConfig ): ...
142+ def init (self , run_config : RunConfig ) -> None :
143+ """
144+ Initialize the metric with the given run configuration.
145+
146+ Parameters
147+ ----------
148+ run_config : RunConfig
149+ Configuration for the metric run including timeouts and other settings.
150+ """
151+ ...
143152
144153 @deprecated ("0.2" , removal = "0.3" , alternative = "single_turn_ascore" )
145154 def score (self , row : t .Dict , callbacks : Callbacks = None ) -> float :
@@ -229,10 +238,23 @@ class MetricWithLLM(Metric, PromptMixin):
229238 llm : t .Optional [BaseRagasLLM ] = None
230239 output_type : t .Optional [MetricOutputType ] = None
231240
232- def init (self , run_config : RunConfig ):
241+ def init (self , run_config : RunConfig ) -> None :
242+ """
243+ Initialize the metric with run configuration and validate LLM is present.
244+
245+ Parameters
246+ ----------
247+ run_config : RunConfig
248+ Configuration for the metric run.
249+
250+ Raises
251+ ------
252+ ValueError
253+ If no LLM is provided to the metric.
254+ """
233255 if self .llm is None :
234256 raise ValueError (
235- f"Metric '{ self .name } ' has no valid LLM provided (self.llm is None). Please initantiate a the metric with an LLM to run." # noqa
257+ f"Metric '{ self .name } ' has no valid LLM provided (self.llm is None). Please instantiate the metric with an LLM to run."
236258 )
237259 self .llm .set_run_config (run_config )
238260
@@ -735,29 +757,106 @@ class ModeMetric(t.Protocol):
735757
736758@dataclass
737759class SimpleBaseMetric (ABC ):
738- """Base class for simple metrics that return MetricResult objects."""
760+ """
761+ Base class for simple metrics that return MetricResult objects.
762+
763+ This class provides the foundation for metrics that evaluate inputs
764+ and return structured MetricResult objects containing scores and reasoning.
765+
766+ Attributes
767+ ----------
768+ name : str
769+ The name of the metric.
770+ allowed_values : AllowedValuesType
771+ Allowed values for the metric output. Can be a list of strings for
772+ discrete metrics, a tuple of floats for numeric metrics, or an integer
773+ for ranking metrics.
774+
775+ Examples
776+ --------
777+ >>> from ragas.metrics import discrete_metric
778+ >>>
779+ >>> @discrete_metric(name="sentiment", allowed_values=["positive", "negative"])
780+ >>> def sentiment_metric(user_input: str, response: str) -> str:
781+ ... return "positive" if "good" in response else "negative"
782+ >>>
783+ >>> result = sentiment_metric(user_input="How are you?", response="I'm good!")
784+ >>> print(result.value) # "positive"
785+ """
739786
740787 name : str
741788 allowed_values : AllowedValuesType = field (default_factory = lambda : ["pass" , "fail" ])
742789
743790 @abstractmethod
744791 def score (self , ** kwargs ) -> "MetricResult" :
792+ """
793+ Synchronously calculate the metric score.
794+
795+ Parameters
796+ ----------
797+ **kwargs : dict
798+ Input parameters required by the specific metric implementation.
799+
800+ Returns
801+ -------
802+ MetricResult
803+ The evaluation result containing the score and reasoning.
804+ """
745805 pass
746806
747807 @abstractmethod
748808 async def ascore (self , ** kwargs ) -> "MetricResult" :
809+ """
810+ Asynchronously calculate the metric score.
811+
812+ Parameters
813+ ----------
814+ **kwargs : dict
815+ Input parameters required by the specific metric implementation.
816+
817+ Returns
818+ -------
819+ MetricResult
820+ The evaluation result containing the score and reasoning.
821+ """
749822 pass
750823
751824 def batch_score (
752825 self ,
753826 inputs : t .List [t .Dict [str , t .Any ]],
754827 ) -> t .List ["MetricResult" ]:
828+ """
829+ Synchronously calculate scores for a batch of inputs.
830+
831+ Parameters
832+ ----------
833+ inputs : List[Dict[str, Any]]
834+ List of input dictionaries, each containing parameters for the metric.
835+
836+ Returns
837+ -------
838+ List[MetricResult]
839+ List of evaluation results, one for each input.
840+ """
755841 return [self .score (** input_dict ) for input_dict in inputs ]
756842
757843 async def abatch_score (
758844 self ,
759845 inputs : t .List [t .Dict [str , t .Any ]],
760846 ) -> t .List ["MetricResult" ]:
847+ """
848+ Asynchronously calculate scores for a batch of inputs in parallel.
849+
850+ Parameters
851+ ----------
852+ inputs : List[Dict[str, Any]]
853+ List of input dictionaries, each containing parameters for the metric.
854+
855+ Returns
856+ -------
857+ List[MetricResult]
858+ List of evaluation results, one for each input.
859+ """
761860 async_tasks = []
762861 for input_dict in inputs :
763862 # Process input asynchronously
@@ -767,29 +866,30 @@ async def abatch_score(
767866 return await asyncio .gather (* async_tasks )
768867
769868
770- def create_auto_response_model (name : str , ** fields ):
771- """Create a response model and mark it as auto-generated by Ragas.
869+ def create_auto_response_model (name : str , ** fields ) -> t .Type ["BaseModel" ]:
870+ """
871+ Create a response model and mark it as auto-generated by Ragas.
772872
773873 This function creates a Pydantic model using create_model and marks it
774874 with a special attribute to indicate it was auto-generated. This allows
775875 the save() method to distinguish between auto-generated models (which
776876 are recreated on load) and custom user models.
777877
778- Parameters:
779- -----------
878+ Parameters
879+ ----------
780880 name : str
781881 Name for the model class
782882 **fields
783- Field definitions in create_model format
883+ Field definitions in create_model format.
784884 Each field is specified as: field_name=(type, default_or_field_info)
785885
786- Returns:
787- --------
886+ Returns
887+ -------
788888 Type[BaseModel]
789889 Pydantic model class marked as auto-generated
790890
791- Examples:
792- ---------
891+ Examples
892+ --------
793893 >>> from pydantic import Field
794894 >>> # Simple model with required fields
795895 >>> ResponseModel = create_auto_response_model(
0 commit comments