Skip to content

Commit

Permalink
backwards compatibility for tools
Browse files Browse the repository at this point in the history
  • Loading branch information
richardblythman committed Apr 25, 2024
1 parent 2633d7f commit 3371077
Show file tree
Hide file tree
Showing 9 changed files with 35 additions and 14 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,12 @@ license: Apache-2.0
aea_version: '>=1.0.0, <2.0.0'
fingerprint:
__init__.py: bafybeibt7f7crtwvmkg7spy3jhscmlqltvyblzp32g6gj44v7tlo5lycuq
prediction_request_rag.py: bafybeibeuxk2znvfrs5cd675jel5zvbtg7rpjzkbv3vjk5xvjrz2vk7qbq
prediction_request_rag.py: bafybeicllugnruskdj7ipmrj2vrtlxmjpqtwlk4c3cfjttfzuvkeldp3m4
fingerprint_ignore_patterns: []
entry_point: prediction_request_rag.py
callable: run
params:
default_model: claude-3-sonnet-20240229
default_model: gpt-4-0125-preview
dependencies:
google-api-python-client:
version: ==2.95.0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,9 @@ def embeddings(self, model, input):
}
ALLOWED_TOOLS = [
"prediction-request-rag",

# LEGACY
"prediction-request-rag-claude",
]
ALLOWED_MODELS = list(LLM_SETTINGS.keys())
DEFAULT_NUM_URLS = defaultdict(lambda: 3)
Expand Down Expand Up @@ -673,12 +676,14 @@ def parser_prediction_response(response: str) -> str:

def run(**kwargs) -> Tuple[Optional[str], Any, Optional[Dict[str, Any]], Any]:
"""Run the task"""
tool = kwargs["tool"]
model = kwargs.get("model")
if "claude" in tool: # maintain backwards compatibility
model = "claude-3-sonnet-20240229"
print(f"MODEL: {model}")
with LLMClientManager(
kwargs["api_keys"], model, embedding_provider="openai"
):
tool = kwargs["tool"]
prompt = extract_question(kwargs["prompt"])
max_tokens = kwargs.get(
"max_tokens", LLM_SETTINGS[model]["default_max_tokens"]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,12 @@ license: Apache-2.0
aea_version: '>=1.0.0, <2.0.0'
fingerprint:
__init__.py: bafybeib36ew6vbztldut5xayk5553rylrq7yv4cpqyhwc5ktvd4cx67vwu
prediction_request_reasoning.py: bafybeidhdrx5j475uvezhu3wnso66whuybsj77qq6hrqsreh6lc25ux7qe
prediction_request_reasoning.py: bafybeidb43nygtvbhimnsd223ddpoii46dwirb5znmp2g473u4jii36jqa
fingerprint_ignore_patterns: []
entry_point: prediction_request_reasoning.py
callable: run
params:
default_model: claude-3-sonnet-20240229
default_model: gpt-4-0125-preview
dependencies:
google-api-python-client:
version: ==2.95.0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,9 @@ def embeddings(self, model, input):
}
ALLOWED_TOOLS = [
"prediction-request-reasoning",

# LEGACY
"prediction-request-reasoning-claude",
]
ALLOWED_MODELS = list(LLM_SETTINGS.keys())
DEFAULT_NUM_URLS = defaultdict(lambda: 3)
Expand Down Expand Up @@ -839,12 +842,14 @@ def extract_question(prompt: str) -> str:

def run(**kwargs) -> Tuple[str, Optional[str], Optional[Dict[str, Any]], Any]:
"""Run the task"""
tool = kwargs["tool"]
model = kwargs.get("model")
if "claude" in tool: # maintain backwards compatibility
model = "claude-3-sonnet-20240229"
print(f"MODEL: {model}")
with LLMClientManager(
kwargs["api_keys"], model, embedding_provider="openai"
):
tool = kwargs["tool"]
prompt = extract_question(kwargs["prompt"])
max_tokens = kwargs.get(
"max_tokens", LLM_SETTINGS[model]["default_max_tokens"]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ license: Apache-2.0
aea_version: '>=1.0.0, <2.0.0'
fingerprint:
__init__.py: bafybeiflni5dkn5fqe7fnu4lgbqxzfrgochhqfbgzwz3vlf5grijp3nkpm
prediction_url_cot.py: bafybeiae5r4xpyqaymqa3v5yvyp3xbjy5agjmm73edczw6moqxear45km4
prediction_url_cot.py: bafybeihebxfv4xj22nq4mkch6xuddcnu7jv473zec2n5p65oxy63asjudy
fingerprint_ignore_patterns: []
entry_point: prediction_url_cot.py
callable: run
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,9 @@ def embeddings(self, model, input):
}
ALLOWED_TOOLS = [
"prediction-url-cot",

# LEGACY
"prediction-url-cot-claude",
]
ALLOWED_MODELS = list(LLM_SETTINGS.keys())
NUM_QUERIES = 5
Expand Down Expand Up @@ -588,10 +591,12 @@ def parser_prediction_response(response: str) -> str:

def run(**kwargs) -> Tuple[Optional[str], Any, Optional[Dict[str, Any]], Any]:
"""Run the task"""
tool = kwargs["tool"]
model = kwargs.get("model")
if "claude" in tool: # maintain backwards compatibility
model = "claude-3-sonnet-20240229"
print(f"MODEL: {model}")
with LLMClientManager(kwargs["api_keys"], model):
tool = kwargs["tool"]
prompt = extract_question(kwargs["prompt"])
max_tokens = kwargs.get(
"max_tokens", LLM_SETTINGS[model]["default_max_tokens"]
Expand Down
8 changes: 4 additions & 4 deletions packages/packages.json
Original file line number Diff line number Diff line change
Expand Up @@ -5,19 +5,19 @@
"custom/valory/openai_request/0.1.0": "bafybeihjtddwwkvwzaltk6yhtkk3xxnwnkurdtyuy6ki5tpf7h5htvuxnq",
"custom/valory/prediction_request_embedding/0.1.0": "bafybeifnz5fzxvzyj3mmjpfsre3nzbdieuyjvnxqxuplopp5taz4qw7ys4",
"custom/valory/resolve_market/0.1.0": "bafybeiaag2e7rsdr3bwg6mlmfyom4vctsdapohco7z45pxhzjymepz3rya",
"custom/valory/prediction_request/0.1.0": "bafybeiboljzvtnfrf3z4zsepwnyys3b53q7lszcmmcoxxu5f72oyuvjkv4",
"custom/valory/prediction_request/0.1.0": "bafybeibnshbgciu6inzdjzxeysrwvsin4iitkgd4fkj7a2omjzbdrga2ue",
"custom/valory/stability_ai_request/0.1.0": "bafybeicyyteycvzj4lk33p4t7mspfarc5d5ktbysu7oqkv6woo4aouxira",
"custom/polywrap/prediction_with_research_report/0.1.0": "bafybeiewbcbfyjnyqyp4oou6ianxseakblwjyck22bd2doqojjk37uyxwy",
"custom/jhehemann/prediction_sum_url_content/0.1.0": "bafybeiby55g53cvc4vpbgww5awrlf6x67h7q7pg5xlhwber75ejdkh4twa",
"custom/psouranis/optimization_by_prompting/0.1.0": "bafybeihb3pyk5qcbj5ib7377p65tznzdsnwilyyhlkcvaj2scmfcpsh6ru",
"custom/nickcom007/sme_generation_request/0.1.0": "bafybeibqv4ru4lpufy2hvcb3swqhzuq2kejjxmlyepofx6l6mxce6lhiqq",
"custom/nickcom007/prediction_request_sme/0.1.0": "bafybeigsszaat6k5m5a3ljyem7xdhjflpcm24imtcscgst3tghpwhamglu",
"custom/napthaai/resolve_market_reasoning/0.1.0": "bafybeiewdqtfkee3od5kuktrhyzexy7466ea3w3to7vv6qnli6qutfrqaa",
"custom/napthaai/prediction_request_rag/0.1.0": "bafybeihpvcoy6cg4humgxaqukanjedm32tdcvbpblez4zoycp5vswgpsca",
"custom/napthaai/prediction_request_reasoning/0.1.0": "bafybeie5ga4rhno4ua2s5wnsjks2kkgz5ismalsebowii5qnqo74lh2svy",
"custom/napthaai/prediction_request_rag/0.1.0": "bafybeif7ufhrlhpuegm6kpiw6jzye6jmp4fjvxgn3hwcv4vkolrrrmidmy",
"custom/napthaai/prediction_request_reasoning/0.1.0": "bafybeifzkvc6j5wbbremt2jqig4ozaackzpz3o5okkoihmm3wdpptpviz4",
"custom/valory/prepare_tx/0.1.0": "bafybeibjqckeb73df724lr4xkrmeh3woqwas4mswa7au65xnwag2edad2e",
"custom/valory/short_maker/0.1.0": "bafybeif63rt4lkopu3rc3l7sg6tebrrwg2lxqufjx6dx4hoda5yzax43fa",
"custom/napthaai/prediction_url_cot/0.1.0": "bafybeid5uwf5i7epbztlmhptbgf2jhsscab5lhyxih7ejpkszrbqtrf6k4",
"custom/napthaai/prediction_url_cot/0.1.0": "bafybeic3ch7wfhxqvwgoud7xotuu3khs4xch3ej35kox2gulya2hv65wbu",
"custom/napthaai/prediction_url_cot_claude/0.1.0": "bafybeicbjywni5hx5ssoiv6tnnjbqzsck6cmtsdpr6m562z6afogz5eh44",
"custom/napthaai/prediction_request_reasoning_claude/0.1.0": "bafybeihtx2cejxoy42jwk2i5m4evfzz537aic5njuawxnzdzwlo63kdduq",
"custom/napthaai/prediction_request_rag_claude/0.1.0": "bafybeickr32t7nmapuoymjyo3cf5rr2v2zapksxcivuqsgjr2gn6zo6y7y",
Expand Down
2 changes: 1 addition & 1 deletion packages/valory/customs/prediction_request/component.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ license: Apache-2.0
aea_version: '>=1.0.0, <2.0.0'
fingerprint:
__init__.py: bafybeibbn67pnrrm4qm3n3kbelvbs3v7fjlrjniywmw2vbizarippidtvi
prediction_request.py: bafybeihf36nxqee2xgigty2fnd2lkbebpn7elywgfh3dttmxiri6ulqivm
prediction_request.py: bafybeigf5k62mxbmcrvjvsnixpbn3hvxlp2l62sk7jtx5vs7fdg5cgtfxe
fingerprint_ignore_patterns: []
entry_point: prediction_request.py
callable: run
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,10 @@ def count_tokens(text: str, model: str) -> int:
"prediction-offline",
"prediction-online",
# "prediction-online-summarized-info",

# LEGACY
"claude-prediction-offline",
"claude-prediction-online",
]
ALLOWED_MODELS = list(LLM_SETTINGS.keys())
# the default number of URLs to fetch online information for
Expand Down Expand Up @@ -658,10 +662,12 @@ def adjust_additional_information(

def run(**kwargs) -> Tuple[str, Optional[str], Optional[Dict[str, Any]], Any]:
"""Run the task"""
tool = kwargs["tool"]
engine = kwargs.get("model")
if "claude" in tool: # maintain backwards compatibility
engine = "claude-3-sonnet-20240229"
print(f"ENGINE: {engine}")
with LLMClientManager(kwargs["api_keys"], engine):
tool = kwargs["tool"]
prompt = kwargs["prompt"]
max_tokens = kwargs.get(
"max_tokens", LLM_SETTINGS[engine]["default_max_tokens"]
Expand Down

0 comments on commit 3371077

Please sign in to comment.