Skip to content

Commit 4584bb2

Browse files
committed
Convert the syntax to use with blocks, and fix some context block ordering bugs
1 parent b08ee82 commit 4584bb2

File tree

7 files changed

+381
-371
lines changed

7 files changed

+381
-371
lines changed

guidance/library/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,9 +13,10 @@
1313

1414
# context blocks
1515
from ._block import block
16-
from ._role import role, system, assistant, user, function, instruction
16+
from ._role import role, system, assistant, user, function, instruction, indent_roles
1717
from ._format import monospace
1818
from ._silent import silent
19+
from ._set_var import set_var
1920
# from ..models._model import context_free
2021

2122
# stateless library functions

guidance/library/_role.py

Lines changed: 24 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import guidance
22
from ._block import block
3+
from ._set_var import set_var
34

45
nodisp_start = "<||_#NODISP_||>"
56
nodisp_end = "<||_/NODISP_||>"
@@ -8,50 +9,54 @@
89

910

1011
@guidance
11-
def role_opener(lm, role_name, debug=False, **kwargs):
12+
def role_opener(lm, role_name, **kwargs):
13+
indent = lm.get("__role_indent", True)
1214
if not hasattr(lm, "get_role_start"):
1315
raise Exception(
1416
f"You need to use a chat model in order the use role blocks like `with {role_name}():`! Perhaps you meant to use the {type(lm).__name__}Chat class?"
1517
)
1618

1719
# Block start container (centers elements)
18-
lm += f"<||_html:<div style='display: flex; border-bottom: 1px solid rgba(127, 127, 127, 0.2); justify-content: center; align-items: center;'><div style='flex: 0 0 80px; opacity: 0.5;'>{role_name.lower()}</div><div style='flex-grow: 1; padding: 5px; padding-top: 10px; padding-bottom: 10px; margin-top: 0px; white-space: pre-wrap; margin-bottom: 0px;'>_||>"
20+
if indent:
21+
lm += f"<||_html:<div style='display: flex; border-bottom: 1px solid rgba(127, 127, 127, 0.2); justify-content: center; align-items: center;'><div style='flex: 0 0 80px; opacity: 0.5;'>{role_name.lower()}</div><div style='flex-grow: 1; padding: 5px; padding-top: 10px; padding-bottom: 10px; margin-top: 0px; white-space: pre-wrap; margin-bottom: 0px;'>_||>"
1922

2023
# Start of either debug or HTML no disp block
21-
if debug:
22-
lm += span_start
23-
else:
24+
if indent:
2425
lm += nodisp_start
26+
else:
27+
lm += span_start
2528

2629
lm += lm.get_role_start(role_name, **kwargs)
2730

2831
# End of either debug or HTML no disp block
29-
if debug:
30-
lm += span_end
31-
else:
32+
if indent:
3233
lm += nodisp_end
34+
else:
35+
lm += span_end
3336

3437
return lm
3538

3639

3740
@guidance
38-
def role_closer(lm, role_name, debug=False, **kwargs):
41+
def role_closer(lm, role_name, **kwargs):
42+
indent = lm.get("__role_indent", True)
3943
# Start of either debug or HTML no disp block
40-
if debug:
41-
lm += span_start
42-
else:
44+
if indent:
4345
lm += nodisp_start
46+
else:
47+
lm += span_start
4448

4549
lm += lm.get_role_end(role_name)
4650

4751
# End of either debug or HTML no disp block
48-
if debug:
49-
lm += span_end
50-
else:
52+
if indent:
5153
lm += nodisp_end
54+
else:
55+
lm += span_end
5256

5357
# End of top container
54-
lm += "<||_html:</div></div>_||>"
58+
if indent:
59+
lm += "<||_html:</div></div>_||>"
5560

5661
return lm
5762

@@ -85,3 +90,6 @@ def function(text=None, **kwargs):
8590

8691
def instruction(text=None, **kwargs):
8792
return role("instruction", text, **kwargs)
93+
94+
def indent_roles(indent=True):
95+
return set_var("__role_indent", indent)

guidance/library/_set_var.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
import guidance
2+
from ._block import block
3+
4+
@guidance
5+
def set_opener(lm, name, value):
6+
if name in lm:
7+
lm = lm.set("__save" + name, lm[name])
8+
return lm.set(name, value)
9+
10+
@guidance
11+
def set_closer(lm, name):
12+
if "__save" + name in lm:
13+
return lm.set(name, lm["__save" + name]).remove("__save" + name)
14+
else:
15+
return lm.remove(name)
16+
17+
def set_var(name, value=True):
18+
return block(
19+
opener=set_opener(name, value),
20+
closer=set_closer(name),
21+
)

guidance/models/_model.py

Lines changed: 34 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -252,31 +252,44 @@ def __add__(self, value):
252252
# inside this context we are free to drop display calls that come too close together
253253
with throttle_refresh():
254254

255-
# close any newly closed contexts
255+
# find what new blocks need to be applied
256+
new_blocks = []
257+
for context in Model.open_blocks:
258+
if context not in lm.opened_blocks:
259+
new_blocks.append(context)
260+
261+
# mark this so we don't re-add when computing the opener or closer (even though we don't know the close text yet)
262+
lm.opened_blocks[context] = (0, "")
263+
264+
# find what old blocks need to be removed
265+
old_blocks = []
256266
for context in list(reversed(lm.opened_blocks)):
257267
if context not in Model.open_blocks and context in lm.opened_blocks:
258-
pos, close_text = lm.opened_blocks[context] # save so we can delete it before adding it
259-
if context.name is not None:
260-
lm._variables[context.name] = format_pattern.sub("", lm._state[pos:])
268+
old_blocks.append((lm.opened_blocks[context], context))
269+
270+
# delete this so we don't re-close when computing the opener or closer
261271
del lm.opened_blocks[context]
262-
lm._inplace_append(close_text)
272+
273+
# close any newly closed contexts
274+
for (pos, close_text), context in old_blocks:
275+
if context.name is not None:
276+
lm._variables[context.name] = format_pattern.sub("", lm._state[pos:])
277+
lm += context.closer
263278

264279
# apply any newly opened contexts (new from this object's perspective)
265-
for context in Model.open_blocks:
266-
if context not in lm.opened_blocks:
267-
lm.opened_blocks[context] = (0, "") # mark this so we don't readd when computing the opener (even though we don't know the close text yet)
268-
lm += context.opener
269-
with grammar_only():
270-
tmp = lm + context.closer
271-
close_text = tmp._state[len(lm._state):] # get the new state added by calling the closer
272-
lm.opened_blocks[context] = (len(lm._state), close_text)
273-
274-
# clear out names that we override
275-
if context.name is not None:
276-
if context.name in lm._variables:
277-
del lm._variables[context.name]
278-
if context.name in lm._variables_log_probs:
279-
del lm._variables_log_probs[context.name]
280+
for context in new_blocks:
281+
lm += context.opener
282+
with grammar_only():
283+
tmp = lm + context.closer
284+
close_text = tmp._state[len(lm._state):] # get the new state added by calling the closer
285+
lm.opened_blocks[context] = (len(lm._state), close_text)
286+
287+
# clear out names that we override
288+
if context.name is not None:
289+
if context.name in lm._variables:
290+
del lm._variables[context.name]
291+
if context.name in lm._variables_log_probs:
292+
del lm._variables_log_probs[context.name]
280293

281294
# wrap raw string values
282295
if isinstance(value, str):
@@ -957,9 +970,7 @@ def __call__(self, grammar, max_tokens=1000000, n=1, top_p=1, temperature=0.0, e
957970
# self._cache_state["new_token_ids"].append(sampled_token_ind)
958971

959972
# capture the named groups from the parse tree
960-
new_captured_data, new_captured_log_prob_data = parser.get_captures()
961-
captured_data.update(new_captured_data)
962-
captured_log_prob_data.update(new_captured_log_prob_data)
973+
parser.get_captures(captured_data, captured_log_prob_data)
963974

964975
# we have no valid log prob data if we didn't compute it
965976
yield new_bytes[hidden_count:], is_generated, new_bytes_prob, captured_data, captured_log_prob_data, token_count - last_token_count
Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1,2 @@
1-
from ._llama import Llama, LlamaChat
1+
from ._llama import Llama, LlamaChat
2+
from ._transformers import Transformers, TransformersChat

0 commit comments

Comments
 (0)