Skip to content

Commit

Permalink
linting
Browse files Browse the repository at this point in the history
  • Loading branch information
aayush3011 committed Feb 20, 2025
1 parent 74899f5 commit 964e53b
Show file tree
Hide file tree
Showing 5 changed files with 36 additions and 34 deletions.
6 changes: 3 additions & 3 deletions libs/azure-ai/poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion libs/azure-ai/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ azure-monitor-opentelemetry = { "version" = "^1.6.4", optional = true }
opentelemetry-semantic-conventions-ai = { "version" = "^0.4.2", optional = true }
opentelemetry-instrumentation-threading = { "version" = "^0.49b2", optional = true }
numpy = "^1.24"
pymongo = "^4.5.0"

[tool.poetry.extras]
opentelemetry = ["azure-monitor-opentelemetry", "opentelemetry-semantic-conventions-ai", "opentelemetry-instrumentation-threading"]
Expand All @@ -42,7 +43,6 @@ pytest-asyncio = "^0.21.1"
python-dotenv = "^1.0.1"
syrupy = "^4.7.2"
langchain-core = {git = "https://github.com/langchain-ai/langchain.git", subdirectory = "libs/core"}
pymongo = "^4.5.0"
numpy = ">=1.21"

[tool.poetry.group.test_integration.dependencies]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@
import pytest
from langchain_core.globals import get_llm_cache, set_llm_cache
from langchain_core.outputs import Generation
from langchain_openai.embeddings import OpenAIEmbeddings

from langchain_azure_ai.chat_models import AzureAIChatCompletionsModel
from langchain_azure_ai.embeddings import AzureAIEmbeddingsModel
from langchain_azure_ai.vectorstores.azure_cosmos_db_mongo_vcore import (
CosmosDBSimilarityType,
CosmosDBVectorSearchType,
Expand Down Expand Up @@ -50,19 +50,20 @@ def random_string() -> str:

@pytest.fixture()
def azure_openai_embeddings() -> Any:
openai_embeddings: OpenAIEmbeddings = OpenAIEmbeddings(
model=model_name,
chunk_size=1,
azure_openai_embeddings: AzureAIEmbeddingsModel = AzureAIEmbeddingsModel(
endpoint="",
credential="",
model_name="",
)
return openai_embeddings
return azure_openai_embeddings


@pytest.mark.requires("pymongo")
@pytest.mark.skipif(
not _has_env_vars(), reason="Missing Azure CosmosDB Mongo vCore env. vars"
)
def test_azure_cosmos_db_semantic_cache(
azure_openai_embeddings: OpenAIEmbeddings,
azure_openai_embeddings: AzureAIEmbeddingsModel,
) -> None:
set_llm_cache(
AzureCosmosDBMongoVCoreSemanticCache(
Expand Down Expand Up @@ -101,7 +102,7 @@ def test_azure_cosmos_db_semantic_cache(
not _has_env_vars(), reason="Missing Azure CosmosDB Mongo vCore env. vars"
)
def test_azure_cosmos_db_semantic_cache_inner_product(
azure_openai_embeddings: OpenAIEmbeddings,
azure_openai_embeddings: AzureAIEmbeddingsModel,
) -> None:
set_llm_cache(
AzureCosmosDBMongoVCoreSemanticCache(
Expand Down Expand Up @@ -140,7 +141,7 @@ def test_azure_cosmos_db_semantic_cache_inner_product(
not _has_env_vars(), reason="Missing Azure CosmosDB Mongo vCore env. vars"
)
def test_azure_cosmos_db_semantic_cache_multi(
azure_openai_embeddings: OpenAIEmbeddings,
azure_openai_embeddings: AzureAIEmbeddingsModel,
) -> None:
set_llm_cache(
AzureCosmosDBMongoVCoreSemanticCache(
Expand Down Expand Up @@ -181,7 +182,7 @@ def test_azure_cosmos_db_semantic_cache_multi(
not _has_env_vars(), reason="Missing Azure CosmosDB Mongo vCore env. vars"
)
def test_azure_cosmos_db_semantic_cache_multi_inner_product(
azure_openai_embeddings: OpenAIEmbeddings,
azure_openai_embeddings: AzureAIEmbeddingsModel,
) -> None:
set_llm_cache(
AzureCosmosDBMongoVCoreSemanticCache(
Expand Down Expand Up @@ -222,7 +223,7 @@ def test_azure_cosmos_db_semantic_cache_multi_inner_product(
not _has_env_vars(), reason="Missing Azure CosmosDB Mongo vCore env. vars"
)
def test_azure_cosmos_db_semantic_cache_hnsw(
azure_openai_embeddings: OpenAIEmbeddings,
azure_openai_embeddings: AzureAIEmbeddingsModel,
) -> None:
set_llm_cache(
AzureCosmosDBMongoVCoreSemanticCache(
Expand Down Expand Up @@ -261,7 +262,7 @@ def test_azure_cosmos_db_semantic_cache_hnsw(
not _has_env_vars(), reason="Missing Azure CosmosDB Mongo vCore env. vars"
)
def test_azure_cosmos_db_semantic_cache_inner_product_hnsw(
azure_openai_embeddings: OpenAIEmbeddings,
azure_openai_embeddings: AzureAIEmbeddingsModel,
) -> None:
set_llm_cache(
AzureCosmosDBMongoVCoreSemanticCache(
Expand Down Expand Up @@ -300,7 +301,7 @@ def test_azure_cosmos_db_semantic_cache_inner_product_hnsw(
not _has_env_vars(), reason="Missing Azure CosmosDB Mongo vCore env. vars"
)
def test_azure_cosmos_db_semantic_cache_multi_hnsw(
azure_openai_embeddings: OpenAIEmbeddings,
azure_openai_embeddings: AzureAIEmbeddingsModel,
) -> None:
set_llm_cache(
AzureCosmosDBMongoVCoreSemanticCache(
Expand Down Expand Up @@ -341,7 +342,7 @@ def test_azure_cosmos_db_semantic_cache_multi_hnsw(
not _has_env_vars(), reason="Missing Azure CosmosDB Mongo vCore env. vars"
)
def test_azure_cosmos_db_semantic_cache_multi_inner_product_hnsw(
azure_openai_embeddings: OpenAIEmbeddings,
azure_openai_embeddings: AzureAIEmbeddingsModel,
) -> None:
set_llm_cache(
AzureCosmosDBMongoVCoreSemanticCache(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@
import pytest
from langchain_core.globals import get_llm_cache, set_llm_cache
from langchain_core.outputs import Generation
from langchain_openai.embeddings import OpenAIEmbeddings

from langchain_azure_ai.chat_models import AzureAIChatCompletionsModel
from langchain_azure_ai.embeddings import AzureAIEmbeddingsModel
from langchain_azure_ai.vectorstores.cache import AzureCosmosDBNoSqlSemanticCache

HOST = "COSMOS_DB_URI"
Expand All @@ -31,12 +31,13 @@ def partition_key() -> Any:


@pytest.fixture()
def azure_openai_embeddings() -> OpenAIEmbeddings:
openai_embeddings: OpenAIEmbeddings = OpenAIEmbeddings(
model=model_name,
chunk_size=1,
def azure_openai_embeddings() -> Any:
azure_openai_embeddings: AzureAIEmbeddingsModel = AzureAIEmbeddingsModel(
endpoint="",
credential="",
model_name="",
)
return openai_embeddings
return azure_openai_embeddings


# cosine, euclidean, innerproduct
Expand Down Expand Up @@ -68,7 +69,7 @@ def vector_embedding_policy(distance_function: str) -> dict:

def test_azure_cosmos_db_nosql_semantic_cache_cosine_quantizedflat(
cosmos_client: Any,
azure_openai_embeddings: OpenAIEmbeddings,
azure_openai_embeddings: AzureAIEmbeddingsModel,
) -> None:
set_llm_cache(
AzureCosmosDBNoSqlSemanticCache(
Expand Down Expand Up @@ -99,7 +100,7 @@ def test_azure_cosmos_db_nosql_semantic_cache_cosine_quantizedflat(

def test_azure_cosmos_db_nosql_semantic_cache_cosine_flat(
cosmos_client: Any,
azure_openai_embeddings: OpenAIEmbeddings,
azure_openai_embeddings: AzureAIEmbeddingsModel,
) -> None:
set_llm_cache(
AzureCosmosDBNoSqlSemanticCache(
Expand Down Expand Up @@ -130,7 +131,7 @@ def test_azure_cosmos_db_nosql_semantic_cache_cosine_flat(

def test_azure_cosmos_db_nosql_semantic_cache_dotproduct_quantizedflat(
cosmos_client: Any,
azure_openai_embeddings: OpenAIEmbeddings,
azure_openai_embeddings: AzureAIEmbeddingsModel,
) -> None:
set_llm_cache(
AzureCosmosDBNoSqlSemanticCache(
Expand Down Expand Up @@ -163,7 +164,7 @@ def test_azure_cosmos_db_nosql_semantic_cache_dotproduct_quantizedflat(

def test_azure_cosmos_db_nosql_semantic_cache_dotproduct_flat(
cosmos_client: Any,
azure_openai_embeddings: OpenAIEmbeddings,
azure_openai_embeddings: AzureAIEmbeddingsModel,
) -> None:
set_llm_cache(
AzureCosmosDBNoSqlSemanticCache(
Expand Down Expand Up @@ -196,7 +197,7 @@ def test_azure_cosmos_db_nosql_semantic_cache_dotproduct_flat(

def test_azure_cosmos_db_nosql_semantic_cache_euclidean_quantizedflat(
cosmos_client: Any,
azure_openai_embeddings: OpenAIEmbeddings,
azure_openai_embeddings: AzureAIEmbeddingsModel,
) -> None:
set_llm_cache(
AzureCosmosDBNoSqlSemanticCache(
Expand Down Expand Up @@ -227,7 +228,7 @@ def test_azure_cosmos_db_nosql_semantic_cache_euclidean_quantizedflat(

def test_azure_cosmos_db_nosql_semantic_cache_euclidean_flat(
cosmos_client: Any,
azure_openai_embeddings: OpenAIEmbeddings,
azure_openai_embeddings: AzureAIEmbeddingsModel,
) -> None:
set_llm_cache(
AzureCosmosDBNoSqlSemanticCache(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@

import pytest
from langchain_core.documents import Document
from langchain_openai.embeddings import OpenAIEmbeddings

from langchain_azure_ai.embeddings import AzureAIEmbeddingsModel
from langchain_azure_ai.vectorstores.azure_cosmos_db_mongo_vcore import (
Expand Down Expand Up @@ -58,11 +57,12 @@ def collection() -> Any:

@pytest.fixture()
def azure_openai_embeddings() -> Any:
openai_embeddings: OpenAIEmbeddings = OpenAIEmbeddings(
model=model_name,
chunk_size=1,
azure_openai_embeddings: AzureAIEmbeddingsModel = AzureAIEmbeddingsModel(
endpoint="",
credential="",
model_name="",
)
return openai_embeddings
return azure_openai_embeddings


"""
Expand Down

0 comments on commit 964e53b

Please sign in to comment.