Skip to content

Commit f3b27fc

Browse files
committed
⚡ Small cleanups
Signed-off-by: Gaurav-Kumbhat <Gaurav.Kumbhat@ibm.com>
1 parent 865f3e9 commit f3b27fc

File tree

2 files changed

+4
-3
lines changed

2 files changed

+4
-3
lines changed

vllm_detector_adapter/generative_detectors/llama_guard.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ def __post_process_results(self, results):
5454
new_scores.append(scores[i])
5555

5656
# Fetch categories as the last line in the response available in csv format
57-
for category in content.strip().split("\n")[-1].split(","):
57+
for category in content.splitlines()[-1].split(","):
5858
category_choice = copy.deepcopy(choice)
5959
category_choice.message.content = category
6060
new_choices.append(category_choice)

vllm_detector_adapter/protocol.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -63,13 +63,14 @@ def from_chat_completion_response(results, contents: List[str]):
6363
for content_idx, (responses, scores, detection_type) in enumerate(results):
6464

6565
detection_responses = []
66+
start = 0
67+
end = len(contents[content_idx])
68+
6669
for i, choice in enumerate(responses.choices):
6770
content = choice.message.content
6871
# NOTE: for providing spans, we currently consider entire generated text as a span.
6972
# This is because, at the time of writing, the generative guardrail models does not
7073
# provide spefific information about input text, which can be used to deduce spans.
71-
start = 0
72-
end = len(contents[content_idx])
7374
if content and isinstance(content, str):
7475
response_object = ContentsDetectionResponseObject(
7576
detection_type=detection_type,

0 commit comments

Comments
 (0)