|
| 1 | +# SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]> |
| 2 | +# |
| 3 | +# SPDX-License-Identifier: Apache-2.0 |
| 4 | + |
| 5 | +from dataclasses import replace |
| 6 | +from typing import Any, Optional, Union |
| 7 | + |
| 8 | +from haystack import component |
| 9 | +from haystack.components.generators.chat.openai import OpenAIChatGenerator as BaseOpenAIChatGenerator |
| 10 | +from haystack.dataclasses import ChatMessage, StreamingCallbackT |
| 11 | +from haystack.tools import Tool, Toolset |
| 12 | + |
| 13 | +from haystack_experimental.utils.hallucination_risk_calculator.dataclasses import HallucinationScoreConfig |
| 14 | +from haystack_experimental.utils.hallucination_risk_calculator.openai_planner import calculate_hallucination_metrics |
| 15 | + |
| 16 | + |
| 17 | +@component |
| 18 | +class OpenAIChatGenerator(BaseOpenAIChatGenerator): |
| 19 | + """ |
| 20 | + An OpenAI chat-based text generator component that supports hallucination risk scoring. |
| 21 | +
|
| 22 | + This is based on the paper |
| 23 | + [LLMs are Bayesian, in Expectation, not in Realization](https://arxiv.org/abs/2507.11768). |
| 24 | +
|
| 25 | + ## Usage Example: |
| 26 | +
|
| 27 | + ```python |
| 28 | + from haystack.dataclasses import ChatMessage |
| 29 | +
|
| 30 | + from haystack_experimental.utils.hallucination_risk_calculator.dataclasses import HallucinationScoreConfig |
| 31 | + from haystack_experimental.components.generators.chat.openai import OpenAIChatGenerator |
| 32 | +
|
| 33 | + # Evidence-based Example |
| 34 | + llm = OpenAIChatGenerator(model="gpt-4o") |
| 35 | + rag_result = llm.run( |
| 36 | + messages=[ |
| 37 | + ChatMessage.from_user( |
| 38 | + text="Task: Answer strictly based on the evidence provided below.\n" |
| 39 | + "Question: Who won the Nobel Prize in Physics in 2019?\n" |
| 40 | + "Evidence:\n" |
| 41 | + "- Nobel Prize press release (2019): James Peebles (1/2); Michel Mayor & Didier Queloz (1/2).\n" |
| 42 | + "Constraints: If evidence is insufficient or conflicting, refuse." |
| 43 | + ) |
| 44 | + ], |
| 45 | + hallucination_score_config=HallucinationScoreConfig(skeleton_policy="evidence_erase"), |
| 46 | + ) |
| 47 | + print(f"Decision: {rag_result['replies'][0].meta['hallucination_decision']}") |
| 48 | + print(f"Risk bound: {rag_result['replies'][0].meta['hallucination_risk']:.3f}") |
| 49 | + print(f"Rationale: {rag_result['replies'][0].meta['hallucination_rationale']}") |
| 50 | + print(f"Answer:\n{rag_result['replies'][0].text}") |
| 51 | + print("---") |
| 52 | + ``` |
| 53 | + """ |
| 54 | + |
| 55 | + @component.output_types(replies=list[ChatMessage]) |
| 56 | + def run( |
| 57 | + self, |
| 58 | + messages: list[ChatMessage], |
| 59 | + streaming_callback: Optional[StreamingCallbackT] = None, |
| 60 | + generation_kwargs: Optional[dict[str, Any]] = None, |
| 61 | + *, |
| 62 | + tools: Optional[Union[list[Tool], Toolset]] = None, |
| 63 | + tools_strict: Optional[bool] = None, |
| 64 | + hallucination_score_config: Optional[HallucinationScoreConfig] = None, |
| 65 | + ) -> dict[str, list[ChatMessage]]: |
| 66 | + """ |
| 67 | + Invokes chat completion based on the provided messages and generation parameters. |
| 68 | +
|
| 69 | + :param messages: |
| 70 | + A list of ChatMessage instances representing the input messages. |
| 71 | + :param streaming_callback: |
| 72 | + A callback function that is called when a new token is received from the stream. |
| 73 | + :param generation_kwargs: |
| 74 | + Additional keyword arguments for text generation. These parameters will |
| 75 | + override the parameters passed during component initialization. |
| 76 | + For details on OpenAI API parameters, see [OpenAI documentation](https://platform.openai.com/docs/api-reference/chat/create). |
| 77 | + :param tools: |
| 78 | + A list of tools or a Toolset for which the model can prepare calls. If set, it will override the |
| 79 | + `tools` parameter set during component initialization. This parameter can accept either a list of |
| 80 | + `Tool` objects or a `Toolset` instance. |
| 81 | + :param tools_strict: |
| 82 | + Whether to enable strict schema adherence for tool calls. If set to `True`, the model will follow exactly |
| 83 | + the schema provided in the `parameters` field of the tool definition, but this may increase latency. |
| 84 | + If set, it will override the `tools_strict` parameter set during component initialization. |
| 85 | + :param hallucination_score_config: |
| 86 | + If provided, the generator will evaluate the hallucination risk of its responses using |
| 87 | + the OpenAIPlanner and annotate each response with hallucination metrics. |
| 88 | + This involves generating multiple samples and analyzing their consistency, which may increase |
| 89 | + latency and cost. Use this option when you need to assess the reliability of the generated content |
| 90 | + in scenarios where accuracy is critical. |
| 91 | + For details, see the [research paper](https://arxiv.org/abs/2507.11768) |
| 92 | +
|
| 93 | + :returns: |
| 94 | + A dictionary with the following key: |
| 95 | + - `replies`: A list containing the generated responses as ChatMessage instances. If hallucination |
| 96 | + scoring is enabled, each message will include additional metadata: |
| 97 | + - `hallucination_decision`: "ANSWER" if the model decided to answer, "REFUSE" if it abstained. |
| 98 | + - `hallucination_risk`: The EDFL hallucination risk bound. |
| 99 | + - `hallucination_rationale`: The rationale behind the hallucination decision. |
| 100 | + """ |
| 101 | + if len(messages) == 0: |
| 102 | + return {"replies": []} |
| 103 | + |
| 104 | + # Call parent implementation |
| 105 | + result = super(OpenAIChatGenerator, self).run( |
| 106 | + messages=messages, |
| 107 | + streaming_callback=streaming_callback, |
| 108 | + generation_kwargs=generation_kwargs, |
| 109 | + tools=tools, |
| 110 | + tools_strict=tools_strict, |
| 111 | + ) |
| 112 | + completions = result["replies"] |
| 113 | + |
| 114 | + # Add hallucination scoring if configured |
| 115 | + if hallucination_score_config and messages[-1].text: |
| 116 | + hallucination_meta = calculate_hallucination_metrics( |
| 117 | + prompt=messages[-1].text, hallucination_score_config=hallucination_score_config, chat_generator=self |
| 118 | + ) |
| 119 | + completions = [replace(m, _meta={**m.meta, **hallucination_meta}) for m in completions] |
| 120 | + |
| 121 | + return {"replies": completions} |
| 122 | + |
| 123 | + @component.output_types(replies=list[ChatMessage]) |
| 124 | + async def run_async( |
| 125 | + self, |
| 126 | + messages: list[ChatMessage], |
| 127 | + streaming_callback: Optional[StreamingCallbackT] = None, |
| 128 | + generation_kwargs: Optional[dict[str, Any]] = None, |
| 129 | + *, |
| 130 | + tools: Optional[Union[list[Tool], Toolset]] = None, |
| 131 | + tools_strict: Optional[bool] = None, |
| 132 | + hallucination_score_config: Optional[HallucinationScoreConfig] = None, |
| 133 | + ) -> dict[str, list[ChatMessage]]: |
| 134 | + """ |
| 135 | + Asynchronously invokes chat completion based on the provided messages and generation parameters. |
| 136 | +
|
| 137 | + This is the asynchronous version of the `run` method. It has the same parameters and return values |
| 138 | + but can be used with `await` in async code. |
| 139 | +
|
| 140 | + :param messages: |
| 141 | + A list of ChatMessage instances representing the input messages. |
| 142 | + :param streaming_callback: |
| 143 | + A callback function that is called when a new token is received from the stream. |
| 144 | + Must be a coroutine. |
| 145 | + :param generation_kwargs: |
| 146 | + Additional keyword arguments for text generation. These parameters will |
| 147 | + override the parameters passed during component initialization. |
| 148 | + For details on OpenAI API parameters, see [OpenAI documentation](https://platform.openai.com/docs/api-reference/chat/create). |
| 149 | + :param tools: |
| 150 | + A list of tools or a Toolset for which the model can prepare calls. If set, it will override the |
| 151 | + `tools` parameter set during component initialization. This parameter can accept either a list of |
| 152 | + `Tool` objects or a `Toolset` instance. |
| 153 | + :param tools_strict: |
| 154 | + Whether to enable strict schema adherence for tool calls. If set to `True`, the model will follow exactly |
| 155 | + the schema provided in the `parameters` field of the tool definition, but this may increase latency. |
| 156 | + If set, it will override the `tools_strict` parameter set during component initialization. |
| 157 | + :param hallucination_score_config: |
| 158 | + If provided, the generator will evaluate the hallucination risk of its responses using |
| 159 | + the OpenAIPlanner and annotate each response with hallucination metrics. |
| 160 | + This involves generating multiple samples and analyzing their consistency, which may increase |
| 161 | + latency and cost. Use this option when you need to assess the reliability of the generated content |
| 162 | + in scenarios where accuracy is critical. |
| 163 | + For details, see the [research paper](https://arxiv.org/abs/2507.11768) |
| 164 | +
|
| 165 | + :returns: |
| 166 | + A dictionary with the following key: |
| 167 | + - `replies`: A list containing the generated responses as ChatMessage instances. If hallucination |
| 168 | + scoring is enabled, each message will include additional metadata: |
| 169 | + - `hallucination_decision`: "ANSWER" if the model decided to answer, "REFUSE" if it abstained. |
| 170 | + - `hallucination_risk`: The EDFL hallucination risk bound. |
| 171 | + - `hallucination_rationale`: The rationale behind the hallucination decision. |
| 172 | + """ |
| 173 | + if len(messages) == 0: |
| 174 | + return {"replies": []} |
| 175 | + |
| 176 | + # Call parent implementation |
| 177 | + result = await super(OpenAIChatGenerator, self).run_async( |
| 178 | + messages=messages, |
| 179 | + streaming_callback=streaming_callback, |
| 180 | + generation_kwargs=generation_kwargs, |
| 181 | + tools=tools, |
| 182 | + tools_strict=tools_strict, |
| 183 | + ) |
| 184 | + completions = result["replies"] |
| 185 | + |
| 186 | + # Add hallucination scoring if configured |
| 187 | + if hallucination_score_config and messages[-1].text: |
| 188 | + hallucination_meta = calculate_hallucination_metrics( |
| 189 | + prompt=messages[-1].text, hallucination_score_config=hallucination_score_config, chat_generator=self |
| 190 | + ) |
| 191 | + completions = [replace(m, _meta={**m.meta, **hallucination_meta}) for m in completions] |
| 192 | + |
| 193 | + return {"replies": completions} |
0 commit comments