Skip to content

Commit

Permalink
Merge pull request #195 from ServiceNow/parsing_failure_handling
Browse files Browse the repository at this point in the history
retry after bad llm output
  • Loading branch information
ollmer authored Mar 6, 2025
2 parents 07b660f + 8432668 commit a1116f9
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 26 deletions.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "TapeAgents"
version = "0.1.5"
version = "0.1.6"
requires-python = ">= 3.10, <3.13"
description = "TapeAgents framework for building, tuning and evaluating LLM Agents"
authors = [
Expand Down
47 changes: 22 additions & 25 deletions tapeagents/nodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -262,27 +262,26 @@ def generate_steps(
it will yield a SetNextNode step to continue the execution flow.
"""
new_steps = []
try:
cnt = 0
for event in llm_stream:
if event.output:
cnt += 1
if event.output.content:
for step in self.parse_completion(event.output.content, llm_stream.prompt.id):
step = self.postprocess_step(tape, new_steps, step)
new_steps.append(step)
yield step
if self.use_function_calls and event.output.tool_calls:
for tool_call in event.output.tool_calls:
step = self.tool_call_to_step(tool_call)
new_steps.append(step)
yield step
if not cnt:
raise FatalError("No completions!")
except FatalError:
raise

if self.next_node and not isinstance(new_steps[-1] if new_steps else None, StopStep):
for event in llm_stream:
if not event.output:
continue
if event.output.content:
new_steps += list(self.parse_completion(event.output.content))
if event.output.tool_calls and self.use_function_calls:
new_steps += [self.tool_call_to_step(tool_call) for tool_call in event.output.tool_calls]
for i, step in enumerate(new_steps):
yield self.postprocess_step(tape, new_steps[:i], step)
if isinstance(step, LLMOutputParsingFailureAction):
yield SetNextNode(next_node=self.name) # loop to the same node to retry
yield UserStep(content="Try again")
break
if not new_steps:
raise FatalError("No completions!")
if (
self.next_node
and not isinstance(new_steps[-1], StopStep)
and not any(isinstance(step, SetNextNode) for step in new_steps)
):
yield SetNextNode(next_node=self.next_node)

def tool_call_to_step(self, tool_call: ChatCompletionMessageToolCall) -> Step:
Expand Down Expand Up @@ -310,18 +309,17 @@ def postprocess_step(self, tape: Tape, new_steps: list[Step], step: Step) -> Ste
"""
return step

def parse_completion(self, llm_output: str, prompt_id: str) -> Generator[Step, None, None]:
def parse_completion(self, llm_output: str) -> Generator[Step, None, None]:
"""Parse LLM completion output into a sequence of agent steps.
This method processes the LLM output string by parsing it as JSON and validating it against
the agent step class schema. It handles both single step and multi-step outputs.
Args:
llm_output (str): The raw output string from the LLM to be parsed
prompt_id (str): Identifier for the prompt that generated this completion
Yields:
Step: Individual validated agent steps with prompt_id metadata
Step: Individual validated agent steps
LLMOutputParsingFailureAction: Error information if parsing or validation fails
Note:
Expand Down Expand Up @@ -372,7 +370,6 @@ def parse_completion(self, llm_output: str, prompt_id: str) -> Generator[Step, N
yield LLMOutputParsingFailureAction(error=f"Failed to parse LLM output dict: {e}", llm_output=llm_output)
return
for step in steps:
step.metadata.prompt_id = prompt_id
yield step

def extract_code_blocks(self, text: str) -> list[CodeBlock | str]:
Expand Down

0 comments on commit a1116f9

Please sign in to comment.