Skip to content

Commit ba3b109

Browse files
authored
Added integration with Amazon API Gateway + bug fix in _answer_relevance.py (#327)
Closed other PR. 1. Merge with latest changes 2. Added integration with Amazon API Gateway 3. fixed bug in _answer_relevance.py, caused by text empty or NaN in the cosine_sim calculation
1 parent fe343d6 commit ba3b109

File tree

2 files changed

+11
-3
lines changed

2 files changed

+11
-3
lines changed

src/ragas/llms/langchain.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55
from langchain.chat_models import AzureChatOpenAI, BedrockChat, ChatOpenAI, ChatVertexAI
66
from langchain.chat_models.base import BaseChatModel
7-
from langchain.llms import AzureOpenAI, Bedrock, OpenAI, VertexAI
7+
from langchain.llms import AmazonAPIGateway, AzureOpenAI, Bedrock, OpenAI, VertexAI
88
from langchain.llms.base import BaseLLM
99
from langchain.schema import LLMResult
1010

@@ -25,6 +25,9 @@ def isOpenAI(llm: BaseLLM | BaseChatModel) -> bool:
2525
def isBedrock(llm: BaseLLM | BaseChatModel) -> bool:
2626
return isinstance(llm, Bedrock) or isinstance(llm, BedrockChat)
2727

28+
def isAmazonAPIGateway(llm: BaseLLM | BaseChatModel) -> bool:
29+
return isinstance(llm, AmazonAPIGateway)
30+
2831

2932
# have to specify it twice for runtime and static checks
3033
MULTIPLE_COMPLETION_SUPPORTED = [
@@ -195,6 +198,8 @@ def generate(
195198
temperature = 0.2 if n > 1 else 1e-8
196199
if isBedrock(self.llm) and ("model_kwargs" in self.llm.__dict__):
197200
self.llm.model_kwargs = {"temperature": temperature}
201+
elif isAmazonAPIGateway(self.llm) and ("model_kwargs" in self.llm.__dict__):
202+
self.llm.model_kwargs = {"temperature": temperature}
198203
else:
199204
self.llm.temperature = temperature
200205

src/ragas/metrics/_answer_relevance.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,8 +89,11 @@ def _score_batch(
8989

9090
scores = []
9191
for question, gen_questions in zip(questions, results):
92-
cosine_sim = self.calculate_similarity(question, gen_questions)
93-
scores.append(cosine_sim.mean())
92+
if question is not None and question != "" and len(gen_questions) > 0:
93+
cosine_sim = self.calculate_similarity(question, gen_questions)
94+
scores.append(cosine_sim.mean())
95+
else:
96+
scores.append(0.0)
9497

9598
return scores
9699

0 commit comments

Comments
 (0)