Skip to content

Commit 54a1eb0

Browse files
committed
consistent import of cleandoc and addressed comments
Signed-off-by: Amit Sharma <[email protected]>
1 parent c24ffa7 commit 54a1eb0

File tree

3 files changed

+25
-26
lines changed

3 files changed

+25
-26
lines changed

pywhyllm/suggesters/identification_suggester.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
import guidance
55
from guidance import system, user, assistant, gen
66
import re
7-
import inspect
7+
from inspect import cleandoc
88

99
class IdentificationSuggester(IdentifierProtocol):
1010
CONTEXT: str = """causal mechanisms"""
@@ -204,7 +204,7 @@ def request_mediators(
204204
influencing athe {outcome}. Where a causal model is a conceptual model that describes the causal mechanisms
205205
of a system. You will do this by by answering questions about cause and effect and using your domain knowledge
206206
in {domain_expertise}. Follow the next two steps, and complete the first one before moving on to the second:"""
207-
lm += inspect.cleandoc(prompt_str)
207+
lm += cleandoc(prompt_str)
208208

209209
with user():
210210
prompt_str = f"""(1) From your perspective as an expert in {domain_expertise}, think step by step as you consider the factors
@@ -224,7 +224,7 @@ def request_mediators(
224224
the tags <mediating_factor>factor_name</mediating_factor>. Where factor_name is one of the items within the
225225
factor_names list. If a factor does not have a high likelihood of mediating, then do not wrap the factor with
226226
any tags. Your step by step answer as an in {domain_expertise}:"""
227-
lm += inspect.cleandoc(prompt_str)
227+
lm += cleandoc(prompt_str)
228228

229229
with assistant():
230230
lm += gen("output")
@@ -322,7 +322,7 @@ def request_ivs(
322322
causal mechanisms of a system. You will do this by by answering questions about cause and effect and using
323323
your domain knowledge in {domain_expertise}. Follow the next two steps, and complete the first one before
324324
moving on to the second:"""
325-
lm += inspect.cleandoc(prompt_str)
325+
lm += cleandoc(prompt_str)
326326

327327
with user():
328328
prompt_str = f"""(1) From your perspective as an expert in {domain_expertise}, think step by step
@@ -344,7 +344,7 @@ def request_ivs(
344344
Where factor_name is one of the items within the factor_names list. If a factor does not have a high
345345
likelihood of being an instrumental variable, then do not wrap the factor with any tags. Your step by step
346346
answer as an in {domain_expertise}:"""
347-
lm += inspect.cleandoc(prompt_str)
347+
lm += cleandoc(prompt_str)
348348
with assistant():
349349
lm += gen("output")
350350

pywhyllm/suggesters/model_suggester.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from guidance import system, user, assistant, gen
66
from ..helpers import RelationshipStrategy
77
import re
8-
import inspect
8+
from inspect import cleandoc
99

1010
class ModelSuggester(ModelerProtocol):
1111
CONTEXT: str = """causal mechanisms"""
@@ -39,7 +39,7 @@ def suggest_domain_expertises(
3939
such factors? Think about this in a step by step manner and recommend {n_experts} expertises and
4040
provide each one wrapped within the tags, <domain_expertise></domain_expertise>, along with the
4141
reasoning and explanation wrapped between the tags <explanation></explanation>."""
42-
lm += inspect.cleandoc(prompt_str)
42+
lm += cleandoc(prompt_str)
4343
with assistant():
4444
lm += gen("output")
4545

@@ -82,7 +82,7 @@ def suggest_domain_experts(
8282
about this in a step by step manner and recommend {n_experts} domain experts and provide each one
8383
wrapped within the tags, <domain_expert></domain_expert>, along with the reasoning and explanation
8484
wrapped between the tags <explanation></explanation>."""
85-
lm += inspect.cleandoc(prompt_str)
85+
lm += cleandoc(prompt_str)
8686
with assistant():
8787
lm += gen("output")
8888

@@ -126,7 +126,7 @@ def suggest_stakeholders(
126126
this in a step by step manner and recommend {n_stakeholders} stakeholders. Then provide each useful stakeholder
127127
wrapped within the tags, <stakeholder></stakeholder>, along with the reasoning and explanation wrapped between the tags
128128
<explanation></explanation>."""
129-
lm += inspect.cleandoc(prompt_str)
129+
lm += cleandoc(prompt_str)
130130
with assistant():
131131
lm += gen("output")
132132

@@ -212,7 +212,7 @@ def request_confounders(
212212
analysis_context}. Where a causal model is a conceptual model that describes the causal mechanisms of a
213213
system. You
214214
will do this by answering questions about cause and effect and using your domain knowledge in {domain_expertise}."""
215-
lm += inspect.cleandoc(prompt_str)
215+
lm += cleandoc(prompt_str)
216216
with user():
217217
prompt_str = f"""Follow the next two steps, and complete the first one before moving on to the second: (1)
218218
From your perspective as an
@@ -234,7 +234,7 @@ def request_confounders(
234234
<confounding_factor>factor_name</confounding_factor> where
235235
factor_name is one of the items within the factor_names list. If a factor does not have a high likelihood of directly
236236
confounding, then do not wrap the factor with any tags."""
237-
lm += inspect.cleandoc(prompt_str)
237+
lm += cleandoc(prompt_str)
238238
with assistant():
239239
lm += gen("output")
240240

@@ -311,7 +311,7 @@ def suggest_parents(
311311
{factor},
312312
then do not wrap the factor with any tags. Your answer as an expert in
313313
{domain_expertise}:"""
314-
lm += inspect.cleandoc(prompt_str)
314+
lm += cleandoc(prompt_str)
315315

316316
with assistant():
317317
lm += gen("output")
@@ -383,7 +383,7 @@ def suggest_children(
383383
factor_names list. If a factor does not have a high likelihood of directly influencing and causing the {
384384
factor}, then do not wrap the factor with any tags. Your answer as an expert in
385385
{domain_expertise}:"""
386-
lm += inspect.cleandoc(prompt_str)
386+
lm += cleandoc(prompt_str)
387387
with assistant():
388388
lm += gen("output")
389389

@@ -424,7 +424,7 @@ def suggest_pairwise_relationship(
424424
mechanisms of a system. You will do this by by answering questions about cause and effect and using your
425425
domain
426426
knowledge as an expert in {domain_expertise}."""
427-
lm += inspect.cleandoc(prompt_str)
427+
lm += cleandoc(prompt_str)
428428
with user():
429429
prompt_str = f"""From your perspective as an expert in {domain_expertise}, which of the following is
430430
most likely true? (A) {factor_a} affects {factor_b}; {factor_a} has a high likelihood of directly
@@ -436,7 +436,7 @@ def suggest_pairwise_relationship(
436436
you reach a conclusion, wrap your answer within the tags <answer></answer>. If you are done thinking, provide your
437437
answer wrapped within the tags <answer></answer>. e.g. <answer>A</answer>, <answer>B</answer>, or <answer>C</answer>.
438438
Your answer as an expert in {domain_expertise}:"""
439-
lm += inspect.cleandoc(prompt_str)
439+
lm += cleandoc(prompt_str)
440440

441441
with assistant():
442442
lm += gen("output")

pywhyllm/suggesters/tuebingen_model_suggester.py

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -120,17 +120,17 @@ def _build_description_program(self, variable, use_context=False, ask_reference=
120120
query["user"] = cleandoc(user_prompt)
121121

122122
if ask_reference:
123-
query["user"] += " " # cleandoc removes leading whitespace, so need to add separately
124123
user_prompt = f"""Then provide two research papers that support your description.
125124
Let's think step-by-step to make sure that we have a proper and clear description. Then provide your final
126-
answer within the tags, <description></description>, and each research paper within the tags <reference></reference>."""
127-
query["user"] += cleandoc(user_prompt)
125+
answer within the tags, <description></description>, and each research paper within the tags <reference></reference>."""
126+
# cleandoc removes leading whitespace, so need to add separately
127+
query["user"] += " " + cleandoc(user_prompt)
128128

129129
else:
130-
query["user"] += "\n" # cleandoc removes leading whitespace, so need to add separately
131130
user_prompt = f"""Let's think step-by-step to make sure that we have a proper and clear description. Then provide your final
132131
answer within the tags, <description></description>."""
133-
query["user"] += cleandoc(user_prompt)
132+
# cleandoc removes leading whitespace, so need to add separately
133+
query["user"] += "\n" + cleandoc(user_prompt)
134134

135135
else:
136136
sys_prompt = f"""You are a helpful assistant for writing concise and peer-reviewed descriptions. Your goal
@@ -141,17 +141,16 @@ def _build_description_program(self, variable, use_context=False, ask_reference=
141141
query["user"] = cleandoc(user_prompt)
142142

143143
if ask_reference:
144-
query["user"] += "\n" # cleandoc removes leading whitespace, so need to add separately
145144
user_prompt = f"""Then provide two research papers that support your description.
146145
Let's think step-by-step to make sure that we have a proper and clear description. Then provide
147146
your final answer within the tags, <description></description>, and each research paper within the
148147
tags <paper></paper>."""
149-
query["user"] += cleandoc(user_prompt)
148+
# cleandoc removes leading whitespace, so need to add separately
149+
query["user"] += "\n" + cleandoc(user_prompt)
150150
else:
151-
query["user"] += "\n" # cleandoc removes leading whitespace, so need to add separately
152151
user_prompt = f"""Let's think step-by-step to make sure that we have a proper and clear description. Then provide
153152
your final answer within the tags, <description></description>."""
154-
query["user"] += cleandoc(user_prompt)
153+
query["user"] += "\n" + cleandoc(user_prompt)
155154
return query
156155

157156
def suggest_relationship(
@@ -258,10 +257,10 @@ def _build_relationship_program(
258257
if use_description:
259258
user_prompt = f"""can changing {variable_a}, where {description_a}, change {variable_b}, where
260259
{description_b}? Answer Yes or No."""
261-
query["user"] += cleandoc(user_prompt)
260+
query["user"] = cleandoc(user_prompt)
262261
else:
263262
user_prompt = f"""can changing {variable_a} change {variable_b}? Answer Yes or No."""
264-
query["user"] += cleandoc(user_prompt)
263+
query["user"] = cleandoc(user_prompt)
265264

266265
if ask_reference:
267266
user_prompt = f"""At each step, each expert include a reference to a research paper that supports

0 commit comments

Comments
 (0)