Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Prompt role blocks debug output + new prompt implementation example #578

Merged
merged 7 commits into from
Jan 11, 2024
4 changes: 3 additions & 1 deletion guidance/library/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,11 @@

# context blocks
from ._block import block
from ._role import role, system, assistant, user, function, instruction
from ._role import role, system, assistant, user, function, instruction, indent_roles
from ._format import monospace
from ._silent import silent
from ._set_var import set_var
from ._set_attribute import set_attribute
# from ..models._model import context_free

# stateless library functions
Expand Down
72 changes: 65 additions & 7 deletions guidance/library/_role.py
Original file line number Diff line number Diff line change
@@ -1,37 +1,95 @@
import guidance
from ._block import block
from ._set_attribute import set_attribute

nodisp_start = "<||_#NODISP_||>"
nodisp_end = "<||_/NODISP_||>"
span_start = "<||_html:<span style='background-color: rgba(255, 180, 0, 0.3); border-radius: 3px;'>_||>"
span_end = "<||_html:</span>_||>"


@guidance
def role_opener(lm, role_name, **kwargs):
indent = getattr(lm, "indent_roles", True)
if not hasattr(lm, "get_role_start"):
raise Exception(f"You need to use a chat model in order the use role blocks like `with {role_name}():`! Perhaps you meant to use the {type(lm).__name__}Chat class?")
lm += f"<||_html:<div style='display: flex; border-bottom: 1px solid rgba(127, 127, 127, 0.2); align-items: center;'><div style='flex: 0 0 80px; opacity: 0.5;'>{role_name.lower()}</div><div style='flex-grow: 1; padding: 5px; padding-top: 10px; padding-bottom: 10px; margin-top: 0px; white-space: pre-wrap; margin-bottom: 0px;'>_||>"
lm += "<||_#NODISP_||>" + lm.get_role_start(role_name, **kwargs) + "<||_/NODISP_||>"
raise Exception(
f"You need to use a chat model in order the use role blocks like `with {role_name}():`! Perhaps you meant to use the {type(lm).__name__}Chat class?"
)

# Block start container (centers elements)
if indent:
lm += f"<||_html:<div style='display: flex; border-bottom: 1px solid rgba(127, 127, 127, 0.2); justify-content: center; align-items: center;'><div style='flex: 0 0 80px; opacity: 0.5;'>{role_name.lower()}</div><div style='flex-grow: 1; padding: 5px; padding-top: 10px; padding-bottom: 10px; margin-top: 0px; white-space: pre-wrap; margin-bottom: 0px;'>_||>"

# Start of either debug or HTML no disp block
if indent:
lm += nodisp_start
else:
lm += span_start

lm += lm.get_role_start(role_name, **kwargs)

# End of either debug or HTML no disp block
if indent:
lm += nodisp_end
else:
lm += span_end

return lm


@guidance
def role_closer(lm, role_name, **kwargs):
lm += "<||_html:</div></div>_||>" + "<||_#NODISP_||>" + lm.get_role_end(role_name) + "<||_/NODISP_||>"
indent = getattr(lm, "indent_roles", True)
# Start of either debug or HTML no disp block
if indent:
lm += nodisp_start
else:
lm += span_start

lm += lm.get_role_end(role_name)

# End of either debug or HTML no disp block
if indent:
lm += nodisp_end
else:
lm += span_end

# End of top container
if indent:
lm += "<||_html:</div></div>_||>"

return lm


def role(role_name, text=None, **kwargs):
if text is None:
return block(opener=role_opener(role_name, **kwargs), closer=role_closer(role_name, **kwargs))
return block(
opener=role_opener(role_name, **kwargs),
closer=role_closer(role_name, **kwargs),
)
else:
assert False
#return self.append(open_text + text + close_text)
# return self.append(open_text + text + close_text)


def system(text=None, **kwargs):
return role("system", text, **kwargs)


def user(text=None, **kwargs):
return role("user", text, **kwargs)


def assistant(text=None, **kwargs):
return role("assistant", text, **kwargs)


def function(text=None, **kwargs):
return role("function", text, **kwargs)


def instruction(text=None, **kwargs):
return role("instruction", text, **kwargs)
return role("instruction", text, **kwargs)

def indent_roles(indent=True):
return set_attribute("indent_roles", indent)
21 changes: 21 additions & 0 deletions guidance/library/_set_attribute.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import guidance
from ._block import block

@guidance
def set_attr_opener(lm, name, value):
if hasattr(lm, name):
lm = lm.setattr("__save" + name, getattr(lm, name))
return lm.setattr(name, value)

@guidance
def set_attr_closer(lm, name):
if hasattr(lm, "__save" + name):
return lm.setattr(name, lm["__save" + name]).delattr("__save" + name)
else:
return lm.delattr(name)

def set_attribute(name, value=True):
return block(
opener=set_attr_opener(name, value),
closer=set_attr_closer(name),
)
21 changes: 21 additions & 0 deletions guidance/library/_set_var.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import guidance
from ._block import block

@guidance
def set_opener(lm, name, value):
if name in lm:
lm = lm.set("__save" + name, lm[name])
return lm.set(name, value)

@guidance
def set_closer(lm, name):
if "__save" + name in lm:
return lm.set(name, lm["__save" + name]).remove("__save" + name)
else:
return lm.remove(name)

def set_var(name, value=True):
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I see you implemented this approach to set the same values as in the set_attribute method you did for blocks, but I don't see it used anywhere for now. Is this something you're planning to use later on?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah, I just checked your commit history and you were using variables instead of attributes for the blocks before, so maybe a forgotten file, unless you think it's still useful for other purposes?

return block(
opener=set_opener(name, value),
closer=set_closer(name),
)
83 changes: 60 additions & 23 deletions guidance/models/_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -252,31 +252,44 @@ def __add__(self, value):
# inside this context we are free to drop display calls that come too close together
with throttle_refresh():

# close any newly closed contexts
# find what new blocks need to be applied
new_blocks = []
for context in Model.open_blocks:
if context not in lm.opened_blocks:
new_blocks.append(context)

# mark this so we don't re-add when computing the opener or closer (even though we don't know the close text yet)
lm.opened_blocks[context] = (0, "")

# find what old blocks need to be removed
old_blocks = []
for context in list(reversed(lm.opened_blocks)):
if context not in Model.open_blocks and context in lm.opened_blocks:
pos, close_text = lm.opened_blocks[context] # save so we can delete it before adding it
if context.name is not None:
lm._variables[context.name] = format_pattern.sub("", lm._state[pos:])
old_blocks.append((lm.opened_blocks[context], context))

# delete this so we don't re-close when computing the opener or closer
del lm.opened_blocks[context]
lm._inplace_append(close_text)

# close any newly closed contexts
for (pos, close_text), context in old_blocks:
if context.name is not None:
lm._variables[context.name] = format_pattern.sub("", lm._state[pos:])
lm += context.closer

# apply any newly opened contexts (new from this object's perspective)
for context in Model.open_blocks:
if context not in lm.opened_blocks:
lm.opened_blocks[context] = (0, "") # mark this so we don't readd when computing the opener (even though we don't know the close text yet)
lm += context.opener
with grammar_only():
tmp = lm + context.closer
close_text = tmp._state[len(lm._state):] # get the new state added by calling the closer
lm.opened_blocks[context] = (len(lm._state), close_text)

# clear out names that we override
if context.name is not None:
if context.name in lm._variables:
del lm._variables[context.name]
if context.name in lm._variables_log_probs:
del lm._variables_log_probs[context.name]
for context in new_blocks:
lm += context.opener
with grammar_only():
tmp = lm + context.closer
close_text = tmp._state[len(lm._state):] # get the new state added by calling the closer
lm.opened_blocks[context] = (len(lm._state), close_text)

# clear out names that we override
if context.name is not None:
if context.name in lm._variables:
del lm._variables[context.name]
if context.name in lm._variables_log_probs:
del lm._variables_log_probs[context.name]

# wrap raw string values
if isinstance(value, str):
Expand Down Expand Up @@ -367,6 +380,32 @@ def get(self, key, default=None):
The value to return if the variable is not current set.
'''
return self._variables.get(key, default)

def setattr(self, key, value):
'''Return a new model with the given model attribute set.

Parameters
----------
key : str
The name of the attribute to be set.
value : any
The value to set the attribute to.
'''
copy = self.copy()
setattr(copy, key, value)
return copy

def delattr(self, key):
'''Return a new model with the given attribute deleted.

Parameters
----------
key : str
The attribute name to remove.
'''
copy = self.copy()
delattr(copy, key)
return copy

def set(self, key, value):
'''Return a new model with the given variable value set.
Expand Down Expand Up @@ -957,9 +996,7 @@ def __call__(self, grammar, max_tokens=1000000, n=1, top_p=1, temperature=0.0, e
# self._cache_state["new_token_ids"].append(sampled_token_ind)

# capture the named groups from the parse tree
new_captured_data, new_captured_log_prob_data = parser.get_captures()
captured_data.update(new_captured_data)
captured_log_prob_data.update(new_captured_log_prob_data)
parser.get_captures(captured_data, captured_log_prob_data)

# we have no valid log prob data if we didn't compute it
yield new_bytes[hidden_count:], is_generated, new_bytes_prob, captured_data, captured_log_prob_data, token_count - last_token_count
Expand Down
3 changes: 2 additions & 1 deletion guidance/models/transformers/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
from ._llama import Llama, LlamaChat
from ._llama import Llama, LlamaChat
from ._transformers import Transformers, TransformersChat
Loading