Skip to content

Commit

Permalink
Fix eval mode setting in config (#103)
Browse files Browse the repository at this point in the history
  • Loading branch information
sidnarayanan authored Oct 30, 2024
1 parent ed46be4 commit 41ae29d
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 5 deletions.
5 changes: 3 additions & 2 deletions packages/hotpotqa/src/aviary/envs/hotpotqa/env.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,6 @@ class HotPotQAEnvState(BaseModel):
)
page: str | None = Field(default=None, description="The current Wikipedia page.")

evaluation_mode: EvalAnswerMode = EvalAnswerMode.CONTAINS


def create_tool(function: Callable, name: str) -> Tool:
"""Create a Tool object from a function and set its name.
Expand Down Expand Up @@ -537,6 +535,8 @@ class HotPotQAEnvConfig(BaseModel):
)
proxy: str | None = None

evaluation_mode: EvalAnswerMode = EvalAnswerMode.CONTAINS


class HotPotQADataset(TaskDataset[HotPotQAEnv]):
# SEE: https://huggingface.co/datasets/hotpotqa/hotpot_qa
Expand Down Expand Up @@ -604,6 +604,7 @@ def get_new_env_by_idx(self, idx: int) -> HotPotQAEnv:
correct_reward=self.config.correct_reward,
incorrect_reward=self.config.incorrect_reward,
tool_failure_reward=self.config.tool_failure_reward,
evaluation_mode=self.config.evaluation_mode,
proxy=self.config.proxy,
)

Expand Down
14 changes: 11 additions & 3 deletions packages/hotpotqa/tests/test_hotpotqa_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

from aviary.core import Environment, TaskDataset
from aviary.envs.hotpotqa import HotPotQAEnv
from aviary.envs.hotpotqa.env import HotPotQADataset
from aviary.tools.utils import EvalAnswerMode


Expand All @@ -22,11 +23,18 @@ def test_dataset_from_name() -> None:
dataset = TaskDataset.from_name("hotpotqa", split="dev")
assert isinstance(dataset.get_new_env_by_idx(0), HotPotQAEnv)

# double-check we can load by difficulty level
# double-check we can load with various options
dataset = TaskDataset.from_name(
"hotpotqa", split="train", difficulty_level={"easy", "hard"}
"hotpotqa",
split="train",
difficulty_level={"easy", "hard"},
evaluation_mode=EvalAnswerMode.EXACT,
)
assert isinstance(dataset, HotPotQADataset)
assert len(dataset) == 33633, 'Expected 33633 examples in "train[hard+easy]" split'
assert dataset.get_new_env_by_idx(0).evaluation_mode == EvalAnswerMode.EXACT, (
"evaluation_mode did not propagate to environment"
)
assert len(dataset) == 33633

with pytest.raises(ValueError, match="answer"):
TaskDataset.from_name("hotpotqa", split="test")
Expand Down

0 comments on commit 41ae29d

Please sign in to comment.