Skip to content

Commit

Permalink
Refactor classifier functions to include conversation context and imp…
Browse files Browse the repository at this point in the history
…rove topic extraction handling
  • Loading branch information
Luisotee committed Dec 10, 2024
1 parent 626084a commit 4ee6758
Show file tree
Hide file tree
Showing 2 changed files with 68 additions and 55 deletions.
54 changes: 27 additions & 27 deletions apps/ai_api/eda_ai_api/api/routes/classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,58 +31,58 @@
)


def extract_topics(message: str) -> list[str]:
"""Extract topics from message"""
response = llm.complete(TOPIC_TEMPLATE.format(message=message))
async def extract_topics(message: str, history: list) -> list[str]:
"""Extract topics with conversation context"""
context = "\n".join([f"{msg['role']}: {msg['content']}" for msg in history])
response = llm.complete(TOPIC_TEMPLATE.format(context=context, message=message))
if response.text.strip() == "INSUFFICIENT_CONTEXT":
return ["INSUFFICIENT_CONTEXT"]
topics = [t.strip() for t in response.text.split(",") if t.strip()][:5]
return topics if topics else ["AI", "Technology"]
return topics if topics else ["INSUFFICIENT_CONTEXT"]


def extract_proposal_details(message: str) -> tuple[str, str]:
"""Extract project and grant details"""
response = llm.complete(PROPOSAL_TEMPLATE.format(message=message))
async def extract_proposal_details(message: str, history: list) -> tuple[str, str]:
"""Extract project and grant details with conversation context"""
context = "\n".join([f"{msg['role']}: {msg['content']}" for msg in history])
response = llm.complete(PROPOSAL_TEMPLATE.format(context=context, message=message))
extracted = response.text.split("|")
community_project = extracted[0].strip() if len(extracted) > 0 else "unknown"
grant_call = extracted[1].strip() if len(extracted) > 1 else "unknown"
return community_project, grant_call


def process_decision(decision: str, message: str) -> Dict[str, Any]:
"""Process routing decision and return result"""
async def process_decision(
decision: str, message: str, history: list
) -> Dict[str, Any]:
"""Process routing decision with conversation context"""
logger.info(f"Processing decision: {decision} for message: {message}")

if decision == "discovery":
topics = extract_topics(message)
topics = await extract_topics(message, history)
logger.info(f"Extracted topics: {topics}")

if topics == ["AI", "Technology"]:
if topics == ["INSUFFICIENT_CONTEXT"]:
context = "\n".join([f"{msg['role']}: {msg['content']}" for msg in history])
response = llm.complete(
INSUFFICIENT_TEMPLATES["discovery"].format(message=message)
INSUFFICIENT_TEMPLATES["discovery"].format(
context=context, message=message
)
)
return {"response": response.text}

return (
OpportunityFinderCrew().crew().kickoff(inputs={"topics": ", ".join(topics)})
)

elif decision == "proposal":
community_project, grant_call = extract_proposal_details(message)
community_project, grant_call = await extract_proposal_details(message, history)
logger.info(f"Project: {community_project}, Grant: {grant_call}")

if community_project == "unknown" or grant_call == "unknown":
context = "\n".join([f"{msg['role']}: {msg['content']}" for msg in history])
response = llm.complete(
INSUFFICIENT_TEMPLATES["proposal"].format(message=message)
INSUFFICIENT_TEMPLATES["proposal"].format(
context=context, message=message
)
)
return {"response": response.text}

return (
ProposalWriterCrew(
community_project=community_project, grant_call=grant_call
)
.crew()
.kickoff()
)

elif decision == "heartbeat":
return {"is_alive": True}

Expand Down Expand Up @@ -140,7 +140,7 @@ async def classifier_route(
decision = response.text.strip().lower()

# Process decision and store conversation
result = process_decision(decision, combined_message)
result = await process_decision(decision, combined_message, history)
await zep.add_conversation(session_id, combined_message, str(result))

return ClassifierResponse(result=str(result), session_id=session_id)
Expand Down
69 changes: 41 additions & 28 deletions apps/ai_api/eda_ai_api/utils/prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,46 +14,59 @@
)

TOPIC_TEMPLATE = PromptTemplate(
"""Extract up to 5 most relevant topics for grant opportunity research from the user message.
Return only a comma-separated list of topics (maximum 5), no other text.
"""Previous conversation:
{context}
User message: {message}
Extract up to 5 most relevant topics for grant opportunity research from the user message.
If there isn't enough context to determine specific topics, return "INSUFFICIENT_CONTEXT".
Otherwise, return only a comma-separated list of topics (maximum 5), no other text.
User message: {message}
Topics:"""
Output:"""
)

PROPOSAL_TEMPLATE = PromptTemplate(
"""Extract the community project name and grant program name from the user message.
Return in format: project_name|grant_name
If either cannot be determined, use "unknown" as placeholder.
"""Previous conversation:
{context}
User message: {message}
Extract the community project name and grant program name from the user message.
Return in format: project_name|grant_name
If either cannot be determined, use "unknown" as placeholder.
Output:"""
User message: {message}
Output:"""
)

INSUFFICIENT_TEMPLATES = {
"proposal": PromptTemplate(
"""The user wants to write a proposal but hasn't provided enough information.
Generate a friendly response in the same language as the user's message asking for:
1. The project name and brief description
2. The specific grant program they're applying to (if any)
3. The main objectives of their project
4. The target community or region
User message: {message}
Response:"""
"""Previous conversation:
{context}
The user wants to write a proposal but hasn't provided enough information.
Generate a friendly response in the same language as the user's message asking for:
1. The project name and brief description
2. The specific grant program they're applying to (if any)
3. The main objectives of their project
4. The target community or region
User message: {message}
Response:"""
),
"discovery": PromptTemplate(
"""The user wants to find grant opportunities but hasn't provided enough information.
Generate a friendly response in the same language as the user's message asking for:
1. The main topics or areas of their project
2. The target region or community
3. Any specific funding requirements or preferences
User message: {message}
Response:"""
"""Previous conversation:
{context}
The user wants to find grant opportunities but hasn't provided enough information.
Generate a friendly response in the same language as the user's message asking for:
1. The main topics or areas of their project
2. The target region or community
3. Any specific funding requirements or preferences
User message: {message}
Response:"""
),
}

0 comments on commit 4ee6758

Please sign in to comment.