Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adds configurable stop tokens #293

Merged
merged 3 commits into from
Feb 15, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion nemoguardrails/actions/action_dispatcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,8 +198,8 @@ async def execute_action(
# if the async is not available
result = fn.run(**params)
elif isinstance(fn, Runnable):
# If it's a Runnable, we invoke it as well
runnable = fn
# If it's a Runnable, we invoke it as well

result = await runnable.ainvoke(input=params)
else:
Expand Down
1 change: 1 addition & 0 deletions nemoguardrails/actions/llm/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ async def llm_call(
all_callbacks = logging_callbacks

if isinstance(prompt, str):
# stop sinks here
result = await llm.agenerate_prompt(
[StringPromptValue(text=prompt)], callbacks=all_callbacks, stop=stop
)
Expand Down
3 changes: 2 additions & 1 deletion nemoguardrails/library/self_check/input_check/actions.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,12 +54,13 @@ async def self_check_input(
"user_input": user_input,
},
)
stop = llm_task_manager.task_stop(task=Task.SELF_CHECK_INPUT)

# Initialize the LLMCallInfo object
llm_call_info_var.set(LLMCallInfo(task=Task.SELF_CHECK_INPUT.value))

with llm_params(llm, temperature=0.0):
check = await llm_call(llm, prompt)
check = await llm_call(llm, prompt, stop=stop)

check = check.lower().strip()
log.info(f"Input self-checking result is: `{check}`.")
Expand Down
3 changes: 2 additions & 1 deletion nemoguardrails/library/self_check/output_check/actions.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,12 +57,13 @@ async def self_check_output(
"bot_response": bot_response,
},
)
stop = llm_task_manager.task_stop(task=Task.SELF_CHECK_OUTPUT)

# Initialize the LLMCallInfo object
llm_call_info_var.set(LLMCallInfo(task=Task.SELF_CHECK_OUTPUT.value))

with llm_params(llm, temperature=0.0):
response = await llm_call(llm, prompt)
response = await llm_call(llm, prompt, stop=stop)

response = response.lower().strip()
log.info(f"Output self-checking result is: `{response}`.")
Expand Down
5 changes: 5 additions & 0 deletions nemoguardrails/llm/taskmanager.py
Original file line number Diff line number Diff line change
Expand Up @@ -255,6 +255,11 @@ def parse_task_output(self, task: Task, output: str):
else:
return output

def task_stop(self, task: Task) -> List[str]:
zmackie marked this conversation as resolved.
Show resolved Hide resolved
"""Return the stop sequence for the given task."""
prompt = get_prompt(self.config, task)
return prompt.stop

def register_filter(self, filter_fn: callable, name: Optional[str] = None):
"""Register a custom filter for the rails configuration."""
name = name or filter_fn.__name__
Expand Down
4 changes: 4 additions & 0 deletions nemoguardrails/rails/llm/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,10 @@ class TaskPrompt(BaseModel):
default=_default_config["prompting_mode"],
description="Corresponds to the `prompting_mode` for which this prompt is fetched. Default is 'standard'.",
)
stop: Optional[List[str]] = Field(
default=None,
description="If specified, will be configure stop tokens for models that support this.",
)

@root_validator(pre=True, allow_reuse=True)
def check_fields(cls, values):
Expand Down
52 changes: 52 additions & 0 deletions tests/test_llm_task_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,3 +222,55 @@ def test_prompt_length_exceeded_compressed_history():
events=events,
)
assert len(generate_user_intent_prompt) <= max_task_prompt_length

# Test to check the stop configuration parameter


def test_stop_configuration_parameter():
"""Test the prompts for the OpenAI GPT-3 5 Turbo model."""
config = RailsConfig.from_content(
yaml_content=textwrap.dedent(
"""
models:
- type: main
engine: openai
model: gpt-3.5-turbo-instruct
prompts:
- task: generate_user_intent
stop:
- <<end>>
- <<stop>>
max_length: 3000
content: |-
{{ general_instructions }}

# This is how a conversation between a user and the bot can go:
{{ sample_conversation }}

# This is how the user talks:
{{ examples }}

# This is the current conversation between the user and the bot:
{{ sample_conversation | first_turns(2) }}
{{ history | colang }}
)
)"""
)
)

task_prompt = get_prompt(config, Task.GENERATE_USER_INTENT)

# Assuming the stop parameter is a list of strings
expected_stop_tokens = ["<<end>>", "<<stop>>"]
llm_task_manager = LLMTaskManager(config)

# Render the task prompt with the stop configuration
rendered_prompt = llm_task_manager.render_task_prompt(
task=Task.GENERATE_USER_INTENT,
context={"examples": 'user "Hello there!"\n express greeting'},
events=[],
)

# Check if the stop tokens are correctly set in the rendered prompt
for stop_token in expected_stop_tokens:
assert stop_token in task_prompt.stop