Skip to content

Commit

Permalink
Added a test to verify the successful query response analysis
Browse files Browse the repository at this point in the history
  • Loading branch information
bgodlin committed Jun 25, 2024
1 parent 4e2c002 commit 41a5397
Show file tree
Hide file tree
Showing 5 changed files with 42 additions and 5 deletions.
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
name: graphql-response-analyser
name: graphql_response_analyser
author: subquery
version: 0.1.0
type: custom
description: This module explains the output of the GraphQL server within a given context, including a description of the indexed data being served and the query itself.
license: Apache-2.0
aea_version: '>=1.0.0, <2.0.0'
entry_point: graphql-response-analyser.py
entry_point: graphql_response_analyser.py
callable: run
dependencies:
openai:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
"""This module accepts a GraphQL endpoint, executes queries based on a given description, and explains the response in natural language"""

from typing import Any, Dict, Optional, Tuple
import os
from openai import OpenAI
import json
import requests
Expand All @@ -32,7 +31,7 @@
def analyse_data_and_generate_response(description, query, data):
return f"""
You're a GraphQL query response analyzer. You will be provided with context about the data served by the endpoint, as well as the executed query, to give you a better understanding. Based on this, you are expected to return a short bullet point description of the response in natural language.
You're a GraphQL query response analyzer. You will be provided with context about the data served by the endpoint, as well as the executed query, to give you a better understanding.
Description:
Expand All @@ -46,6 +45,8 @@ def analyse_data_and_generate_response(description, query, data):
{json.dumps(data)}
Based on the provided context, please generate a bullet-pointed summary in a machine-readable JSON format. The JSON structure should have an array object named 'analysis_result,' with each analytical conclusion represented as a separate string element within the array.
"""


Expand Down
36 changes: 36 additions & 0 deletions tests/test_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,9 @@
from packages.valory.customs.prediction_request import prediction_request
from packages.valory.skills.task_execution.utils.apis import KeyChain
from packages.valory.skills.task_execution.utils.benchmarks import TokenCounterCallback

from packages.subquery.customs.graphql_response_analyser import graphql_response_analyser

from tests.constants import (
OPENAI_SECRET_KEY,
STABILITY_API_KEY,
Expand Down Expand Up @@ -175,3 +178,36 @@ def _validate_response(self, response: Any) -> None:
super()._validate_response(response)
expected_num_tx_params = 2
assert len(response[2].keys()) == expected_num_tx_params

test_query = """
{
transfers {
aggregates {
keys
sum {
value
}
}
}
}
"""

class TestGraphResponseAnalyser():
"""Check successful query output analysis"""

tool_callable: str = "run"
tool_module = graphql_response_analyser

def test_run(self) -> None:
"""Test run method."""
kwargs = dict(
tool = "openai-gpt-3.5-turbo",
request="When was the first transfer?",
query = test_query,
endpoint="https://api.subquery.network/sq/subquery/cusdnew__c3Vic",
description="This project manages and indexes data pertaining to cUSD (CELO USD) ERC-20 token transfers and approvals recorded within a dedicated smart contract. The stored data includes information on approvals granted and transfers executed. These entities provide insights into the authorization and movement of USDT tokens within the CELO ecosystem, facilitating analysis and monitoring of token transactions.",
api_keys={"openai": OPENAI_SECRET_KEY}
)
func = getattr(self.tool_module, self.tool_callable)
response = func(**kwargs)
assert "analysis_result" in response[0]
2 changes: 1 addition & 1 deletion tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ deps =
{[deps-tests]deps}
open-autonomy==0.14.10
fastapi==0.110.3
openai==0.27.2
openai==1.11.0
requests==2.28.1
mech-client==0.2.5
py-multibase==1.0.3
Expand Down

0 comments on commit 41a5397

Please sign in to comment.