Skip to content

Commit

Permalink
Remove xfail on some more tests
Browse files Browse the repository at this point in the history
  • Loading branch information
henrytwo committed Sep 26, 2024
1 parent 0570d36 commit a0f4ad1
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 55 deletions.
41 changes: 0 additions & 41 deletions libs/cerebras/langchain_cerebras/chat_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,7 @@
from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, cast

Check failure on line 3 in libs/cerebras/langchain_cerebras/chat_models.py

View workflow job for this annotation

GitHub Actions / cd libs/cerebras / make lint #3.11

Ruff (F401)

langchain_cerebras/chat_models.py:3:25: F401 `typing.AsyncIterator` imported but unused

Check failure on line 3 in libs/cerebras/langchain_cerebras/chat_models.py

View workflow job for this annotation

GitHub Actions / cd libs/cerebras / make lint #3.11

Ruff (F401)

langchain_cerebras/chat_models.py:3:46: F401 `typing.Iterator` imported but unused

Check failure on line 3 in libs/cerebras/langchain_cerebras/chat_models.py

View workflow job for this annotation

GitHub Actions / cd libs/cerebras / make lint #3.11

Ruff (F401)

langchain_cerebras/chat_models.py:3:72: F401 `typing.cast` imported but unused

import openai
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import LangSmithParams
from langchain_core.messages import BaseMessage
from langchain_core.outputs import ChatGenerationChunk
from langchain_core.utils import (
from_env,
secret_from_env,
Expand Down Expand Up @@ -379,38 +373,3 @@ def validate_environment(self) -> Self:
)
self.async_client = self.root_async_client.chat.completions
return self

# Patch tool calling w/ streaming.
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
if kwargs.get("tools"):
yield cast(
ChatGenerationChunk,
super()._generate(messages, stop, run_manager, **kwargs).generations[0],
)
else:
yield from super()._stream(messages, stop, run_manager, **kwargs)

async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
if kwargs.get("tools"):
generation = await super()._agenerate(messages, stop, run_manager, **kwargs)
yield (
cast(
ChatGenerationChunk,
generation.generations[0],
)
)
else:
async for msg in super()._astream(messages, stop, run_manager, **kwargs):
yield msg
16 changes: 2 additions & 14 deletions libs/cerebras/tests/integration_tests/test_base_standard.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,20 +16,8 @@ def chat_model_class(self) -> Type[BaseChatModel]:

@property
def chat_model_params(self) -> dict:
return {"model": "llama3.1-8b"}
return {"model": "llama3.1-70b"}

@pytest.mark.xfail(reason=("Not supported"))
def test_structured_output(self, model: BaseChatModel) -> None:
super().test_structured_output(model)

@pytest.mark.xfail(reason=("Not supported"))
def test_structured_output_pydantic_2_v1(self, model: BaseChatModel) -> None:
super().test_structured_output_pydantic_2_v1(model)

@pytest.mark.xfail(reason=("Not supported"))
def test_tool_calling_with_no_arguments(self, model: BaseChatModel) -> None:
super().test_tool_calling_with_no_arguments(model)

@pytest.mark.xfail(reason=("Not supported"))
@pytest.mark.xfail(reason=("Array input not supported"))
def test_tool_message_histories_list_content(self, model: BaseChatModel) -> None:
super().test_tool_message_histories_list_content(model)

0 comments on commit a0f4ad1

Please sign in to comment.