-
Notifications
You must be signed in to change notification settings - Fork 97
feat: AB-938 - add citations to ask graph block #1272
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||||||
|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -58,7 +58,7 @@ def register(cls, type: str): | |||||||||
| "name": "Add inline graph citations", | ||||||||||
| "type": "Boolean", | ||||||||||
| "desc": "Shows what specific graph sources were used to answer the question.", | ||||||||||
| "default": "yes", | ||||||||||
| "default": "no", | ||||||||||
| "validator": { | ||||||||||
| "type": "boolean", | ||||||||||
| }, | ||||||||||
|
|
@@ -103,7 +103,7 @@ def run(self): | |||||||||
| subqueries = self._get_field( | ||||||||||
| "subqueries", default_field_value="yes") == "yes" | ||||||||||
| graph_citations = self._get_field( | ||||||||||
| "graphCitations", default_field_value="yes") == "yes" | ||||||||||
| "graphCitations", default_field_value="no") == "yes" | ||||||||||
|
|
||||||||||
| response = client.graphs.question( | ||||||||||
| graph_ids=graph_ids, | ||||||||||
|
|
@@ -115,41 +115,42 @@ def run(self): | |||||||||
| } | ||||||||||
| ) | ||||||||||
|
|
||||||||||
| answer_so_far = "" | ||||||||||
| result_dict = {} | ||||||||||
| citations_so_far = [] | ||||||||||
|
|
||||||||||
| if use_streaming: | ||||||||||
| for chunk in response: | ||||||||||
| try: | ||||||||||
| delta_answer = chunk.model_extra.get("answer", "") | ||||||||||
| answer_so_far += delta_answer | ||||||||||
| result_dict["answer"] = answer_so_far | ||||||||||
|
|
||||||||||
| if graph_citations: | ||||||||||
| delta_sources = chunk.model_extra.get("sources", "") | ||||||||||
| citations_so_far.extend(delta_sources) | ||||||||||
| result_dict["citations"] = citations_so_far | ||||||||||
|
|
||||||||||
| self._set_state(state_element, result_dict) | ||||||||||
|
|
||||||||||
| except json.JSONDecodeError: | ||||||||||
| logging.error( | ||||||||||
| "Could not parse stream chunk from graph.question") | ||||||||||
|
|
||||||||||
| else: | ||||||||||
| answer_so_far = response.answer | ||||||||||
| result_dict["answer"] = answer_so_far | ||||||||||
|
|
||||||||||
| if graph_citations: | ||||||||||
| citations_so_far = response.sources or [] | ||||||||||
| result_dict["citations"] = citations_so_far | ||||||||||
|
|
||||||||||
| self._set_state(state_element, result_dict) | ||||||||||
| self.result = answer_so_far | ||||||||||
| self.result = self._parse_response(response, state_element, use_streaming, graph_citations) | ||||||||||
| if state_element: | ||||||||||
| self._set_state(state_element, self.result) | ||||||||||
| self.outcome = "success" | ||||||||||
|
|
||||||||||
| except BaseException as e: | ||||||||||
| self.outcome = "error" | ||||||||||
| raise e | ||||||||||
|
|
||||||||||
|
|
||||||||||
|
|
||||||||||
| def _parse_response(self, response, state_element, use_streaming: bool, graph_citations: bool): | ||||||||||
| if not use_streaming: | ||||||||||
| if graph_citations: | ||||||||||
| return {"answer": response.answer, "citations": response.sources or []} | ||||||||||
| return response.answer | ||||||||||
|
|
||||||||||
| answer = "" | ||||||||||
| citations = [] | ||||||||||
|
|
||||||||||
| for chunk in response: | ||||||||||
| try: | ||||||||||
| delta_answer = chunk.model_extra.get("answer", "") | ||||||||||
| answer += delta_answer | ||||||||||
|
|
||||||||||
| if graph_citations: | ||||||||||
| delta_sources = chunk.model_extra.get("sources", "") | ||||||||||
| citations.extend(delta_sources) | ||||||||||
| self._set_state(state_element, {"answer": answer, "citations": citations}) | ||||||||||
| else: | ||||||||||
| self._set_state(state_element, answer) | ||||||||||
|
|
||||||||||
| except json.JSONDecodeError: | ||||||||||
| logging.error("Could not parse stream chunk from graph.question") | ||||||||||
|
Comment on lines
+150
to
+151
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Exception handler appears mismatched and should use Two issues here:
Suggested fix- except json.JSONDecodeError:
- logging.error("Could not parse stream chunk from graph.question")
+ except Exception:
+ logging.exception("Could not process stream chunk from graph.question")📝 Committable suggestion
Suggested change
🧰 Tools🪛 Ruff (0.14.14)151-151: Use Replace with (TRY400) 🤖 Prompt for AI Agents |
||||||||||
|
|
||||||||||
| if graph_citations: | ||||||||||
| return {"answer": answer, "citations": citations} | ||||||||||
| return answer | ||||||||||
|
|
||||||||||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Removing this breaks the "Use streaming" functionality. The whole purpose of it is to update the state variable as we iterate over the
responsewhich is arriving over the network in multiple parts. With it the user can get more "AI-chatbot-like" experience when you are not waiting for the whole answer to appear at once but instead see it being generatedThere was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@UladzislauK-Writer I see, so when streaming is enabled, I should include the set state inside the for loop?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yes, exactly