Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add loop component 🎁🎄 #5429

Merged
merged 28 commits into from
Jan 17, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
1684b30
add loop component 🎁🎄
rodrigosnader Dec 24, 2024
5afbc4d
[autofix.ci] apply automated fixes
autofix-ci[bot] Dec 24, 2024
c66e6ce
fix: add loop component to init
rodrigosnader Dec 26, 2024
3ea787f
[autofix.ci] apply automated fixes
autofix-ci[bot] Dec 26, 2024
85d2381
refactor(loop): rename loop input variable and improve code quality
ogabrielluiz Jan 6, 2025
2385623
refactor(loop): add type hint to initialize_data method for improved …
ogabrielluiz Jan 6, 2025
8c8242d
Merge branch 'main' into loop-component
ogabrielluiz Jan 13, 2025
cc080fd
fix: mypy error incompatible return value type
italojohnny Jan 14, 2025
8ebe580
feat: adds test cases for loop component compatibility with the APIs,…
edwinjosechittilappilly Jan 14, 2025
8b9d289
Merge branch 'main' into loop-component
edwinjosechittilappilly Jan 14, 2025
0317d83
Merge branch 'main' into loop-component
edwinjosechittilappilly Jan 15, 2025
36fcde5
Merge branch 'main' into loop-component
edwinjosechittilappilly Jan 15, 2025
303779e
feat: improve model input fields for Cohere component (#5712)
viniciossilva3 Jan 16, 2025
b55c9f1
refactor: improve naming consistency in DataCombiner component (#5471)
raphaelchristi Jan 16, 2025
d89f8e8
refactor: Refactor Wikipedia API component (#5432)
raphaelchristi Jan 16, 2025
c39bb39
fix: pass slider input values correctly, add test (#5735)
Cristhianzl Jan 16, 2025
39ef9ba
feat: make AWS credentials required in bedrock component (#5710)
viniciossilva3 Jan 16, 2025
90f570e
chore: update test durations (#5736)
github-actions[bot] Jan 16, 2025
99f2ef6
feat: add truncation to ResultDataResponse (#5704)
ogabrielluiz Jan 16, 2025
778b74d
feat: Add function to validate models with tool calling function and …
edwinjosechittilappilly Jan 16, 2025
2acd434
feat: assistants agent improvements (#5581)
phact Jan 16, 2025
e1b5c70
refactor: enhance flow type safety and clean up unused code (#5669)
Cristhianzl Jan 16, 2025
c51e57c
feat: Add `required=True` to essential inputs across Langflow compone…
Vigtu Jan 16, 2025
47dc891
feat: make YouTube Transcripts URL field required (#5686)
viniciossilva3 Jan 16, 2025
a5f5f3e
fix: Fix memory leak when creating components (#5733)
cbornet Jan 16, 2025
36b3289
test: Update API key requirements and test configurations for fronten…
Cristhianzl Jan 17, 2025
0cba960
Merge branch 'main' into loop-component
lucaseduoli Jan 17, 2025
210de02
Merge branch 'feat/loop_ui' into loop-component
edwinjosechittilappilly Jan 17, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ dependencies = [
"langsmith==0.1.147",
"yfinance==0.2.50",
"wolframalpha==5.1.3",
"astra-assistants[tools]~=2.2.6",
"astra-assistants[tools]~=2.2.9",
"composio-langchain==0.6.13",
"composio-core==0.6.13",
"spider-client==0.1.24",
Expand Down Expand Up @@ -186,6 +186,7 @@ dev-dependencies = [
"blockbuster>=1.5.8,<1.6",
"types-aiofiles>=24.1.0.20240626",
"codeflash>=0.8.4",
"hypothesis>=6.123.17",
]


Expand Down
1 change: 1 addition & 0 deletions src/backend/base/langflow/alembic/env.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
# noqa: INP001
from logging.config import fileConfig

from alembic import context
Expand Down
72 changes: 61 additions & 11 deletions src/backend/base/langflow/api/v1/schemas.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,13 @@
from datetime import datetime, timezone
from decimal import Decimal
from enum import Enum
from pathlib import Path
from typing import Any
from uuid import UUID

from pydantic import (
BaseModel,
ConfigDict,
Field,
field_serializer,
field_validator,
model_serializer,
)
from pydantic import BaseModel, ConfigDict, Field, field_serializer, field_validator, model_serializer

from langflow.graph.schema import RunOutputs
from langflow.graph.utils import serialize_field
from langflow.schema import dotdict
from langflow.schema.graph import Tweaks
from langflow.schema.schema import InputType, OutputType, OutputValue
Expand All @@ -24,6 +17,7 @@
from langflow.services.database.models.user import UserRead
from langflow.services.settings.feature_flags import FeatureFlags
from langflow.services.tracing.schema import Log
from langflow.utils.constants import MAX_TEXT_LENGTH
from langflow.utils.util_strings import truncate_long_strings


Expand Down Expand Up @@ -275,9 +269,65 @@ class ResultDataResponse(BaseModel):
@field_serializer("results")
@classmethod
def serialize_results(cls, v):
"""Serialize results with custom handling for special types and truncation."""
if isinstance(v, dict):
return {key: serialize_field(val) for key, val in v.items()}
return serialize_field(v)
return {key: cls._serialize_and_truncate(val, max_length=MAX_TEXT_LENGTH) for key, val in v.items()}
return cls._serialize_and_truncate(v, max_length=MAX_TEXT_LENGTH)

@staticmethod
def _serialize_and_truncate(obj: Any, max_length: int = MAX_TEXT_LENGTH) -> Any:
"""Helper method to serialize and truncate values."""
if isinstance(obj, bytes):
obj = obj.decode("utf-8", errors="ignore")
if len(obj) > max_length:
return f"{obj[:max_length]}... [truncated]"
return obj
if isinstance(obj, str):
if len(obj) > max_length:
return f"{obj[:max_length]}... [truncated]"
return obj
if isinstance(obj, datetime):
return obj.astimezone().isoformat()
if isinstance(obj, Decimal):
return float(obj)
if isinstance(obj, UUID):
return str(obj)
if isinstance(obj, OutputValue | Log):
# First serialize the model
serialized = obj.model_dump()
# Then recursively truncate all values in the serialized dict
for key, value in serialized.items():
# Handle string values directly to ensure proper truncation
if isinstance(value, str) and len(value) > max_length:
serialized[key] = f"{value[:max_length]}... [truncated]"
else:
serialized[key] = ResultDataResponse._serialize_and_truncate(value, max_length=max_length)
return serialized
if isinstance(obj, BaseModel):
# For other BaseModel instances, serialize all fields
serialized = obj.model_dump()
return {
k: ResultDataResponse._serialize_and_truncate(v, max_length=max_length) for k, v in serialized.items()
}
if isinstance(obj, dict):
return {k: ResultDataResponse._serialize_and_truncate(v, max_length=max_length) for k, v in obj.items()}
if isinstance(obj, list | tuple):
return [ResultDataResponse._serialize_and_truncate(item, max_length=max_length) for item in obj]
return obj

@model_serializer(mode="plain")
def serialize_model(self) -> dict:
"""Custom serializer for the entire model."""
return {
"results": self.serialize_results(self.results),
"outputs": self._serialize_and_truncate(self.outputs, max_length=MAX_TEXT_LENGTH),
"logs": self._serialize_and_truncate(self.logs, max_length=MAX_TEXT_LENGTH),
"message": self._serialize_and_truncate(self.message, max_length=MAX_TEXT_LENGTH),
"artifacts": self._serialize_and_truncate(self.artifacts, max_length=MAX_TEXT_LENGTH),
"timedelta": self.timedelta,
"duration": self.duration,
"used_frozen_result": self.used_frozen_result,
}


class VertexBuildResponse(BaseModel):
Expand Down
98 changes: 98 additions & 0 deletions src/backend/base/langflow/base/astra_assistants/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,20 @@
import os
import pkgutil
import threading
import uuid
from json.decoder import JSONDecodeError
from pathlib import Path
from typing import Any

import astra_assistants.tools as astra_assistants_tools
import requests
from astra_assistants import OpenAIWithDefaultKey, patch
from astra_assistants.tools.tool_interface import ToolInterface
from langchain_core.tools import BaseTool
from pydantic import BaseModel
from requests.exceptions import RequestException

from langflow.components.tools.mcp_stdio import create_input_schema_from_json_schema
from langflow.services.cache.utils import CacheMiss

client_lock = threading.Lock()
Expand Down Expand Up @@ -64,3 +70,95 @@ def tools_from_package(your_package) -> None:


tools_from_package(astra_assistants_tools)


def wrap_base_tool_as_tool_interface(base_tool: BaseTool) -> ToolInterface:
"""wrap_Base_tool_ass_tool_interface.

Wrap a BaseTool instance in a new class implementing ToolInterface,
building a dynamic Pydantic model from its args_schema (if any).
We only call `args_schema()` if it's truly a function/method,
avoiding accidental calls on a Pydantic model class (which is also callable).
"""
raw_args_schema = getattr(base_tool, "args_schema", None)

# --- 1) Distinguish between a function/method vs. class/dict/None ---
if inspect.isfunction(raw_args_schema) or inspect.ismethod(raw_args_schema):
# It's actually a function -> call it once to get a class or dict
raw_args_schema = raw_args_schema()
# Otherwise, if it's a class or dict, do nothing here

# Now `raw_args_schema` might be:
# - A Pydantic model class (subclass of BaseModel)
# - A dict (JSON schema)
# - None
# - Something unexpected => raise error

# --- 2) Convert the schema or model class to a JSON schema dict ---
if raw_args_schema is None:
# No schema => minimal
schema_dict = {"type": "object", "properties": {}}

elif isinstance(raw_args_schema, dict):
# Already a JSON schema
schema_dict = raw_args_schema

elif inspect.isclass(raw_args_schema) and issubclass(raw_args_schema, BaseModel):
# It's a Pydantic model class -> convert to JSON schema
schema_dict = raw_args_schema.schema()

else:
msg = f"args_schema must be a Pydantic model class, a JSON schema dict, or None. Got: {raw_args_schema!r}"
raise TypeError(msg)

# --- 3) Build our dynamic Pydantic model from the JSON schema ---
InputSchema: type[BaseModel] = create_input_schema_from_json_schema(schema_dict) # noqa: N806

# --- 4) Define a wrapper class that uses composition ---
class WrappedDynamicTool(ToolInterface):
"""WrappedDynamicTool.

Uses composition to delegate logic to the original base_tool,
but sets `call(..., arguments: InputSchema)` so we have a real model.
"""

def __init__(self, tool: BaseTool):
self._tool = tool

def call(self, arguments: InputSchema) -> dict: # type: ignore # noqa: PGH003
output = self._tool.invoke(arguments.dict()) # type: ignore # noqa: PGH003
result = ""
if "error" in output[0].data:
result = output[0].data["error"]
elif "result" in output[0].data:
result = output[0].data["result"]
return {"cache_id": str(uuid.uuid4()), "output": result}

def run(self, tool_input: Any) -> str:
return self._tool.run(tool_input)

def name(self) -> str:
"""Return the base tool's name if it exists."""
if hasattr(self._tool, "name"):
return str(self._tool.name)
return super().name()

def to_function(self):
"""Incorporate the base tool's description if present."""
params = InputSchema.schema()
description = getattr(self._tool, "description", "A dynamically wrapped tool")
return {
"type": "function",
"function": {"name": self.name(), "description": description, "parameters": params},
}

# Return an instance of our newly minted class
return WrappedDynamicTool(base_tool)


def sync_upload(file_path, client):
with Path(file_path).open("rb") as sync_file_handle:
return client.files.create(
file=sync_file_handle, # Pass the sync file handle
purpose="assistants",
)
15 changes: 15 additions & 0 deletions src/backend/base/langflow/base/models/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import warnings
from abc import abstractmethod

from langchain_core.language_models import BaseChatModel
from langchain_core.language_models.llms import LLM
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage
from langchain_core.output_parsers import BaseOutputParser
Expand Down Expand Up @@ -43,6 +44,20 @@ class LCModelComponent(Component):
def _get_exception_message(self, e: Exception):
return str(e)

def supports_tool_calling(self, model: LanguageModel) -> bool:
try:
# Check if the bind_tools method is the same as the base class's method
if model.bind_tools is BaseChatModel.bind_tools:
return False

def test_tool(x: int) -> int:
return x

model_with_tool = model.bind_tools([test_tool])
return hasattr(model_with_tool, "tools") and len(model_with_tool.tools) > 0
except (AttributeError, TypeError, ValueError):
return False

def _validate_outputs(self) -> None:
# At least these two outputs must be defined
required_output_methods = ["text_response", "build_model"]
Expand Down
10 changes: 9 additions & 1 deletion src/backend/base/langflow/base/models/model_input_constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def process_inputs(component_data):
if isinstance(component_data, SecretStrInput):
component_data.value = ""
component_data.load_from_db = False
elif component_data.name == "temperature":
elif component_data.name in {"temperature", "tool_model_enabled", "base_url"}:
component_data = set_advanced_true(component_data)
return component_data

Expand Down Expand Up @@ -180,3 +180,11 @@ def _get_amazon_bedrock_inputs_and_fields():

MODEL_PROVIDERS = list(MODEL_PROVIDERS_DICT.keys())
ALL_PROVIDER_FIELDS: list[str] = [field for provider in MODEL_PROVIDERS_DICT.values() for field in provider["fields"]]

MODEL_DYNAMIC_UPDATE_FIELDS = [
"api_key",
"model",
"tool_model_enabled",
"base_url",
"model_name",
]
21 changes: 18 additions & 3 deletions src/backend/base/langflow/components/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from langflow.base.agents.agent import LCToolsAgentComponent
from langflow.base.models.model_input_constants import (
ALL_PROVIDER_FIELDS,
MODEL_DYNAMIC_UPDATE_FIELDS,
MODEL_PROVIDERS_DICT,
)
from langflow.base.models.model_utils import get_model_name
Expand Down Expand Up @@ -144,6 +145,16 @@ def _build_llm_model(self, component, inputs, prefix=""):
model_kwargs = {input_.name: getattr(self, f"{prefix}{input_.name}") for input_ in inputs}
return component.set(**model_kwargs).build_model()

def set_component_params(self, component):
provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)
if provider_info:
inputs = provider_info.get("inputs")
prefix = provider_info.get("prefix")
model_kwargs = {input_.name: getattr(self, f"{prefix}{input_.name}") for input_ in inputs}

return component.set(**model_kwargs)
return component

def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:
"""Delete specified fields from build_config."""
for field in fields:
Expand All @@ -164,7 +175,7 @@ async def update_build_config(
) -> dotdict:
# Iterate over all providers in the MODEL_PROVIDERS_DICT
# Existing logic for updating build_config
if field_name == "agent_llm":
if field_name in ("agent_llm",):
provider_info = MODEL_PROVIDERS_DICT.get(field_value)
if provider_info:
component_class = provider_info.get("component_class")
Expand Down Expand Up @@ -233,10 +244,15 @@ async def update_build_config(
if missing_keys:
msg = f"Missing required keys in build_config: {missing_keys}"
raise ValueError(msg)
if isinstance(self.agent_llm, str) and self.agent_llm in MODEL_PROVIDERS_DICT:
if (
isinstance(self.agent_llm, str)
and self.agent_llm in MODEL_PROVIDERS_DICT
and field_name in MODEL_DYNAMIC_UPDATE_FIELDS
):
provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)
if provider_info:
component_class = provider_info.get("component_class")
component_class = self.set_component_params(component_class)
prefix = provider_info.get("prefix")
if component_class and hasattr(component_class, "update_build_config"):
# Call each component class's update_build_config method
Expand All @@ -246,5 +262,4 @@ async def update_build_config(
build_config = await update_component_build_config(
component_class, build_config, field_value, field_name
)

return build_config
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,13 @@ class AssemblyAIGetSubtitles(Component):
name="api_key",
display_name="Assembly API Key",
info="Your AssemblyAI API key. You can get one from https://www.assemblyai.com/",
required=True,
),
DataInput(
name="transcription_result",
display_name="Transcription Result",
info="The transcription result from AssemblyAI",
required=True,
),
DropdownInput(
name="subtitle_format",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,17 +18,15 @@ class AssemblyAILeMUR(Component):
display_name="Assembly API Key",
info="Your AssemblyAI API key. You can get one from https://www.assemblyai.com/",
advanced=False,
required=True,
),
DataInput(
name="transcription_result",
display_name="Transcription Result",
info="The transcription result from AssemblyAI",
required=True,
),
MultilineInput(
name="prompt",
display_name="Input Prompt",
info="The text to prompt the model",
),
MultilineInput(name="prompt", display_name="Input Prompt", info="The text to prompt the model", required=True),
DropdownInput(
name="final_model",
display_name="Final Model",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ class AssemblyAIListTranscripts(Component):
name="api_key",
display_name="Assembly API Key",
info="Your AssemblyAI API key. You can get one from https://www.assemblyai.com/",
required=True,
),
IntInput(
name="limit",
Expand Down
Loading
Loading