From 1684b300b4f490b04ec9f1f25ab2c80a7ae0d047 Mon Sep 17 00:00:00 2001 From: Rodrigo Nader Date: Tue, 24 Dec 2024 16:21:47 -0300 Subject: [PATCH 01/22] =?UTF-8?q?add=20loop=20component=20=F0=9F=8E=81?= =?UTF-8?q?=F0=9F=8E=84?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../base/langflow/components/logic/loop.py | 91 +++++++++++++++++++ 1 file changed, 91 insertions(+) create mode 100644 src/backend/base/langflow/components/logic/loop.py diff --git a/src/backend/base/langflow/components/logic/loop.py b/src/backend/base/langflow/components/logic/loop.py new file mode 100644 index 000000000000..14f427186808 --- /dev/null +++ b/src/backend/base/langflow/components/logic/loop.py @@ -0,0 +1,91 @@ +from langflow.custom import Component +from langflow.io import DataInput, Output +from langflow.schema import Data + + +class IteratorComponent(Component): + display_name = "Loop" + description = ( + "Iterates over a list of Data objects, outputting one item at a time and aggregating results from loop inputs." + ) + icon = "infinity" + + inputs = [ + DataInput( + name="data", + display_name="Data", + info="The initial list of Data objects to iterate over." + ), + DataInput( + name="loop", + display_name="Loop Input", + info="Data to aggregate during the iteration." + ) + ] + + outputs = [ + Output(display_name="Item", name="item", method="item_output"), + Output(display_name="Done", name="done", method="done_output") + ] + + def initialize_data(self): + """Initialize the data list, context index, and aggregated list.""" + if not self.ctx.get(f"{self._id}_initialized", False): + # Ensure data is a list of Data objects + if isinstance(self.data, Data): + data_list = [self.data] + elif isinstance(self.data, list): + data_list = self.data + else: + raise ValueError("The 'data' input must be a list of Data objects or a single Data object.") + + # Store the initial data and context variables + self.update_ctx({ + f"{self._id}_data": data_list, + f"{self._id}_index": 0, + f"{self._id}_aggregated": [], + f"{self._id}_initialized": True + }) + + def item_output(self) -> Data: + """Output the next item in the list.""" + self.initialize_data() + + # Get data list and current index + data_list = self.ctx.get(f"{self._id}_data", []) + current_index = self.ctx.get(f"{self._id}_index", 0) + + if current_index < len(data_list): + # Output current item + current_item = data_list[current_index] + self.update_ctx({f"{self._id}_index": current_index + 1}) + print("item_output:", current_item) + return current_item + else: + # No more items to output + self.stop("item") + return + + def done_output(self) -> Data: + """Return the aggregated list once all items are processed.""" + self.initialize_data() + + # Get data list and aggregated list + data_list = self.ctx.get(f"{self._id}_data", []) + aggregated = self.ctx.get(f"{self._id}_aggregated", []) + + # Check if loop input is provided + loop_input = self.loop + if loop_input: + # Append loop input to aggregated list + aggregated.append(loop_input) + self.update_ctx({f"{self._id}_aggregated": aggregated}) + + # Check if aggregation is complete + if len(aggregated) >= len(data_list): + print("done_output:", aggregated) + return [data for data in aggregated] + else: + # Not all items have been processed yet + self.stop("done") + return From 5afbc4de58a51eec69306995fef1289cf54ac496 Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Tue, 24 Dec 2024 19:23:12 +0000 Subject: [PATCH 02/22] [autofix.ci] apply automated fixes --- .../base/langflow/components/logic/loop.py | 42 ++++++++----------- 1 file changed, 17 insertions(+), 25 deletions(-) diff --git a/src/backend/base/langflow/components/logic/loop.py b/src/backend/base/langflow/components/logic/loop.py index 14f427186808..efa4de481c3d 100644 --- a/src/backend/base/langflow/components/logic/loop.py +++ b/src/backend/base/langflow/components/logic/loop.py @@ -11,21 +11,13 @@ class IteratorComponent(Component): icon = "infinity" inputs = [ - DataInput( - name="data", - display_name="Data", - info="The initial list of Data objects to iterate over." - ), - DataInput( - name="loop", - display_name="Loop Input", - info="Data to aggregate during the iteration." - ) + DataInput(name="data", display_name="Data", info="The initial list of Data objects to iterate over."), + DataInput(name="loop", display_name="Loop Input", info="Data to aggregate during the iteration."), ] outputs = [ Output(display_name="Item", name="item", method="item_output"), - Output(display_name="Done", name="done", method="done_output") + Output(display_name="Done", name="done", method="done_output"), ] def initialize_data(self): @@ -40,12 +32,14 @@ def initialize_data(self): raise ValueError("The 'data' input must be a list of Data objects or a single Data object.") # Store the initial data and context variables - self.update_ctx({ - f"{self._id}_data": data_list, - f"{self._id}_index": 0, - f"{self._id}_aggregated": [], - f"{self._id}_initialized": True - }) + self.update_ctx( + { + f"{self._id}_data": data_list, + f"{self._id}_index": 0, + f"{self._id}_aggregated": [], + f"{self._id}_initialized": True, + } + ) def item_output(self) -> Data: """Output the next item in the list.""" @@ -61,10 +55,9 @@ def item_output(self) -> Data: self.update_ctx({f"{self._id}_index": current_index + 1}) print("item_output:", current_item) return current_item - else: - # No more items to output - self.stop("item") - return + # No more items to output + self.stop("item") + return None def done_output(self) -> Data: """Return the aggregated list once all items are processed.""" @@ -85,7 +78,6 @@ def done_output(self) -> Data: if len(aggregated) >= len(data_list): print("done_output:", aggregated) return [data for data in aggregated] - else: - # Not all items have been processed yet - self.stop("done") - return + # Not all items have been processed yet + self.stop("done") + return None From c66e6ceb25d8dafacf1ece4d387a21e88aa01ca2 Mon Sep 17 00:00:00 2001 From: Rodrigo Nader Date: Thu, 26 Dec 2024 18:46:26 -0300 Subject: [PATCH 03/22] fix: add loop component to init --- src/backend/base/langflow/components/logic/__init__.py | 2 ++ src/backend/base/langflow/components/logic/loop.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/backend/base/langflow/components/logic/__init__.py b/src/backend/base/langflow/components/logic/__init__.py index 7833224edf4b..d164f68b01e9 100644 --- a/src/backend/base/langflow/components/logic/__init__.py +++ b/src/backend/base/langflow/components/logic/__init__.py @@ -6,6 +6,7 @@ from .pass_message import PassMessageComponent from .run_flow import RunFlowComponent from .sub_flow import SubFlowComponent +from .loop import LoopComponent __all__ = [ "ConditionalRouterComponent", @@ -16,4 +17,5 @@ "PassMessageComponent", "RunFlowComponent", "SubFlowComponent", + "LoopComponent" ] diff --git a/src/backend/base/langflow/components/logic/loop.py b/src/backend/base/langflow/components/logic/loop.py index efa4de481c3d..9361a26766e4 100644 --- a/src/backend/base/langflow/components/logic/loop.py +++ b/src/backend/base/langflow/components/logic/loop.py @@ -3,7 +3,7 @@ from langflow.schema import Data -class IteratorComponent(Component): +class LoopComponent(Component): display_name = "Loop" description = ( "Iterates over a list of Data objects, outputting one item at a time and aggregating results from loop inputs." From 3ea787f88f8f88fdc8494f47e65772e6f1dba7eb Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Thu, 26 Dec 2024 21:47:49 +0000 Subject: [PATCH 04/22] [autofix.ci] apply automated fixes --- src/backend/base/langflow/components/logic/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/backend/base/langflow/components/logic/__init__.py b/src/backend/base/langflow/components/logic/__init__.py index d164f68b01e9..40e84cd1dfb9 100644 --- a/src/backend/base/langflow/components/logic/__init__.py +++ b/src/backend/base/langflow/components/logic/__init__.py @@ -2,20 +2,20 @@ from .data_conditional_router import DataConditionalRouterComponent from .flow_tool import FlowToolComponent from .listen import ListenComponent +from .loop import LoopComponent from .notify import NotifyComponent from .pass_message import PassMessageComponent from .run_flow import RunFlowComponent from .sub_flow import SubFlowComponent -from .loop import LoopComponent __all__ = [ "ConditionalRouterComponent", "DataConditionalRouterComponent", "FlowToolComponent", "ListenComponent", + "LoopComponent", "NotifyComponent", "PassMessageComponent", "RunFlowComponent", "SubFlowComponent", - "LoopComponent" ] From 85d2381c24ee226c25a9b2d6e8f9c96fddce948b Mon Sep 17 00:00:00 2001 From: Gabriel Luiz Freitas Almeida Date: Mon, 6 Jan 2025 15:00:49 -0300 Subject: [PATCH 05/22] refactor(loop): rename loop input variable and improve code quality - Renamed 'loop' input to 'loop_input' for clarity. - Simplified logic for checking loop input and aggregating results. - Enhanced type hints for better code readability and maintainability. --- .../base/langflow/components/logic/loop.py | 66 ++++++++++--------- 1 file changed, 35 insertions(+), 31 deletions(-) diff --git a/src/backend/base/langflow/components/logic/loop.py b/src/backend/base/langflow/components/logic/loop.py index 9361a26766e4..d217b69305e7 100644 --- a/src/backend/base/langflow/components/logic/loop.py +++ b/src/backend/base/langflow/components/logic/loop.py @@ -12,7 +12,7 @@ class LoopComponent(Component): inputs = [ DataInput(name="data", display_name="Data", info="The initial list of Data objects to iterate over."), - DataInput(name="loop", display_name="Loop Input", info="Data to aggregate during the iteration."), + DataInput(name="loop_input", display_name="Loop Input", info="Data to aggregate during the iteration."), ] outputs = [ @@ -22,39 +22,45 @@ class LoopComponent(Component): def initialize_data(self): """Initialize the data list, context index, and aggregated list.""" - if not self.ctx.get(f"{self._id}_initialized", False): - # Ensure data is a list of Data objects - if isinstance(self.data, Data): - data_list = [self.data] - elif isinstance(self.data, list): - data_list = self.data - else: - raise ValueError("The 'data' input must be a list of Data objects or a single Data object.") - - # Store the initial data and context variables - self.update_ctx( - { - f"{self._id}_data": data_list, - f"{self._id}_index": 0, - f"{self._id}_aggregated": [], - f"{self._id}_initialized": True, - } - ) + if self.ctx.get(f"{self._id}_initialized", False): + return + + # Ensure data is a list of Data objects + if isinstance(self.data, Data): + data_list: list[Data] = [self.data] + elif isinstance(self.data, list): + if not all(isinstance(item, Data) for item in self.data): + msg = "All items in the data list must be Data objects." + raise TypeError(msg) + data_list = self.data + else: + msg = "The 'data' input must be a list of Data objects or a single Data object." + raise TypeError(msg) + + # Store the initial data and context variables + self.update_ctx( + { + f"{self._id}_data": data_list, + f"{self._id}_index": 0, + f"{self._id}_aggregated": [], + f"{self._id}_initialized": True, + } + ) def item_output(self) -> Data: """Output the next item in the list.""" self.initialize_data() # Get data list and current index - data_list = self.ctx.get(f"{self._id}_data", []) - current_index = self.ctx.get(f"{self._id}_index", 0) + data_list: list[Data] = self.ctx.get(f"{self._id}_data", []) + current_index: int = self.ctx.get(f"{self._id}_index", 0) if current_index < len(data_list): - # Output current item - current_item = data_list[current_index] + # Output current item and increment index + current_item: Data = data_list[current_index] self.update_ctx({f"{self._id}_index": current_index + 1}) - print("item_output:", current_item) return current_item + # No more items to output self.stop("item") return None @@ -67,17 +73,15 @@ def done_output(self) -> Data: data_list = self.ctx.get(f"{self._id}_data", []) aggregated = self.ctx.get(f"{self._id}_aggregated", []) - # Check if loop input is provided - loop_input = self.loop - if loop_input: - # Append loop input to aggregated list - aggregated.append(loop_input) + # Check if loop input is provided and append to aggregated list + if self.loop_input is not None: + aggregated.append(self.loop_input) self.update_ctx({f"{self._id}_aggregated": aggregated}) # Check if aggregation is complete if len(aggregated) >= len(data_list): - print("done_output:", aggregated) - return [data for data in aggregated] + return aggregated + # Not all items have been processed yet self.stop("done") return None From 23856234d1b26c1acc8e3f58a4a9dd3bba5bb070 Mon Sep 17 00:00:00 2001 From: Gabriel Luiz Freitas Almeida Date: Mon, 6 Jan 2025 15:12:36 -0300 Subject: [PATCH 06/22] refactor(loop): add type hint to initialize_data method for improved clarity --- src/backend/base/langflow/components/logic/loop.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/base/langflow/components/logic/loop.py b/src/backend/base/langflow/components/logic/loop.py index d217b69305e7..14f5a9017763 100644 --- a/src/backend/base/langflow/components/logic/loop.py +++ b/src/backend/base/langflow/components/logic/loop.py @@ -20,7 +20,7 @@ class LoopComponent(Component): Output(display_name="Done", name="done", method="done_output"), ] - def initialize_data(self): + def initialize_data(self) -> None: """Initialize the data list, context index, and aggregated list.""" if self.ctx.get(f"{self._id}_initialized", False): return From cc080fdc6191ca504228524dd0be0514fd107ffa Mon Sep 17 00:00:00 2001 From: italojohnny Date: Tue, 14 Jan 2025 10:32:52 -0300 Subject: [PATCH 07/22] fix: mypy error incompatible return value type --- src/backend/base/langflow/components/logic/loop.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/backend/base/langflow/components/logic/loop.py b/src/backend/base/langflow/components/logic/loop.py index 14f5a9017763..528ab4ec15fa 100644 --- a/src/backend/base/langflow/components/logic/loop.py +++ b/src/backend/base/langflow/components/logic/loop.py @@ -63,7 +63,7 @@ def item_output(self) -> Data: # No more items to output self.stop("item") - return None + return None # type: ignore [return-value] def done_output(self) -> Data: """Return the aggregated list once all items are processed.""" @@ -84,4 +84,4 @@ def done_output(self) -> Data: # Not all items have been processed yet self.stop("done") - return None + return None # type: ignore [return-value] From 8ebe5807667b8881bfa08a1e70a96a01e61f9674 Mon Sep 17 00:00:00 2001 From: Edwin Jose Date: Tue, 14 Jan 2025 10:17:06 -0500 Subject: [PATCH 08/22] feat: adds test cases for loop component compatibility with the APIs, Loop component updates to support API (#5615) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add loop component 🎁🎄 * [autofix.ci] apply automated fixes * fix: add loop component to init * [autofix.ci] apply automated fixes * refactor(loop): rename loop input variable and improve code quality - Renamed 'loop' input to 'loop_input' for clarity. - Simplified logic for checking loop input and aggregating results. - Enhanced type hints for better code readability and maintainability. * refactor(loop): add type hint to initialize_data method for improved clarity * adding test * test cases added * Update test_loop.py * adding test * test cases added * Update test_loop.py * update with the new test case method! * Update test_loop.py * tests updates * Update loop.py * update fix * issues loop issues * reverting debug mode params * solves lint errors and fix the tests * fix: mypy error incompatible return value type * [autofix.ci] apply automated fixes --------- Co-authored-by: Rodrigo Nader Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: Gabriel Luiz Freitas Almeida Co-authored-by: italojohnny --- .../base/langflow/components/logic/loop.py | 81 +- .../custom_component/custom_component.py | 15 + src/backend/tests/conftest.py | 7 + src/backend/tests/data/LoopTest.json | 1624 +++++++++++++++++ .../tests/unit/components/logic/__init__.py | 0 .../tests/unit/components/logic/test_loop.py | 90 + 6 files changed, 1787 insertions(+), 30 deletions(-) create mode 100644 src/backend/tests/data/LoopTest.json create mode 100644 src/backend/tests/unit/components/logic/__init__.py create mode 100644 src/backend/tests/unit/components/logic/test_loop.py diff --git a/src/backend/base/langflow/components/logic/loop.py b/src/backend/base/langflow/components/logic/loop.py index 528ab4ec15fa..258ccac7468f 100644 --- a/src/backend/base/langflow/components/logic/loop.py +++ b/src/backend/base/langflow/components/logic/loop.py @@ -26,16 +26,7 @@ def initialize_data(self) -> None: return # Ensure data is a list of Data objects - if isinstance(self.data, Data): - data_list: list[Data] = [self.data] - elif isinstance(self.data, list): - if not all(isinstance(item, Data) for item in self.data): - msg = "All items in the data list must be Data objects." - raise TypeError(msg) - data_list = self.data - else: - msg = "The 'data' input must be a list of Data objects or a single Data object." - raise TypeError(msg) + data_list = self._validate_data(self.data) # Store the initial data and context variables self.update_ctx( @@ -47,25 +38,62 @@ def initialize_data(self) -> None: } ) + def _validate_data(self, data): + """Validate and return a list of Data objects.""" + if isinstance(data, Data): + return [data] + if isinstance(data, list) and all(isinstance(item, Data) for item in data): + return data + msg = "The 'data' input must be a list of Data objects or a single Data object." + raise TypeError(msg) + + def evaluate_stop_loop(self) -> bool: + """Evaluate whether to stop item or done output.""" + current_index = self.ctx.get(f"{self._id}_index", 0) + data_length = len(self.ctx.get(f"{self._id}_data", [])) + return current_index > data_length + def item_output(self) -> Data: - """Output the next item in the list.""" + """Output the next item in the list or stop if done.""" self.initialize_data() + current_item = Data(text="") - # Get data list and current index - data_list: list[Data] = self.ctx.get(f"{self._id}_data", []) - current_index: int = self.ctx.get(f"{self._id}_index", 0) + if self.evaluate_stop_loop(): + self.stop("item") + return Data(text="") + # Get data list and current index + data_list, current_index = self.loop_variables() if current_index < len(data_list): # Output current item and increment index - current_item: Data = data_list[current_index] - self.update_ctx({f"{self._id}_index": current_index + 1}) - return current_item - - # No more items to output - self.stop("item") - return None # type: ignore [return-value] + try: + current_item = data_list[current_index] + except IndexError: + current_item = Data(text="") + self.aggregated_output() + self.update_ctx({f"{self._id}_index": current_index + 1}) + return current_item def done_output(self) -> Data: + """Trigger the done output when iteration is complete.""" + self.initialize_data() + + if self.evaluate_stop_loop(): + self.stop("item") + self.start("done") + + return self.ctx.get(f"{self._id}_aggregated", []) + self.stop("done") + return Data(text="") + + def loop_variables(self): + """Retrieve loop variables from context.""" + return ( + self.ctx.get(f"{self._id}_data", []), + self.ctx.get(f"{self._id}_index", 0), + ) + + def aggregated_output(self) -> Data: """Return the aggregated list once all items are processed.""" self.initialize_data() @@ -74,14 +102,7 @@ def done_output(self) -> Data: aggregated = self.ctx.get(f"{self._id}_aggregated", []) # Check if loop input is provided and append to aggregated list - if self.loop_input is not None: + if self.loop_input is not None and not isinstance(self.loop_input, str) and len(aggregated) <= len(data_list): aggregated.append(self.loop_input) self.update_ctx({f"{self._id}_aggregated": aggregated}) - - # Check if aggregation is complete - if len(aggregated) >= len(data_list): - return aggregated - - # Not all items have been processed yet - self.stop("done") - return None # type: ignore [return-value] + return aggregated diff --git a/src/backend/base/langflow/custom/custom_component/custom_component.py b/src/backend/base/langflow/custom/custom_component/custom_component.py index 9cb9e152322a..9107dcc54a5d 100644 --- a/src/backend/base/langflow/custom/custom_component/custom_component.py +++ b/src/backend/base/langflow/custom/custom_component/custom_component.py @@ -138,6 +138,21 @@ def stop(self, output_name: str | None = None) -> None: msg = f"Error stopping {self.display_name}: {e}" raise ValueError(msg) from e + def start(self, output_name: str | None = None) -> None: + if not output_name and self._vertex and len(self._vertex.outputs) == 1: + output_name = self._vertex.outputs[0]["name"] + elif not output_name: + msg = "You must specify an output name to call start" + raise ValueError(msg) + if not self._vertex: + msg = "Vertex is not set" + raise ValueError(msg) + try: + self.graph.mark_branch(vertex_id=self._vertex.id, output_name=output_name, state="ACTIVE") + except Exception as e: + msg = f"Error starting {self.display_name}: {e}" + raise ValueError(msg) from e + def append_state(self, name: str, value: Any) -> None: if not self._vertex: msg = "Vertex is not set" diff --git a/src/backend/tests/conftest.py b/src/backend/tests/conftest.py index 5e67b7196004..1b77e8b838e1 100644 --- a/src/backend/tests/conftest.py +++ b/src/backend/tests/conftest.py @@ -103,6 +103,7 @@ def pytest_configure(config): pytest.VECTOR_STORE_PATH = data_path / "Vector_store.json" pytest.SIMPLE_API_TEST = data_path / "SimpleAPITest.json" pytest.MEMORY_CHATBOT_NO_LLM = data_path / "MemoryChatbotNoLLM.json" + pytest.LOOP_TEST = data_path / "LoopTest.json" pytest.CODE_WITH_SYNTAX_ERROR = """ def get_text(): retun "Hello World" @@ -121,6 +122,7 @@ def get_text(): pytest.TWO_OUTPUTS, pytest.VECTOR_STORE_PATH, pytest.MEMORY_CHATBOT_NO_LLM, + pytest.LOOP_TEST, ]: assert path.exists(), f"File {path} does not exist. Available files: {list(data_path.iterdir())}" @@ -324,6 +326,11 @@ def json_memory_chatbot_no_llm(): return pytest.MEMORY_CHATBOT_NO_LLM.read_text(encoding="utf-8") +@pytest.fixture +def json_loop_test(): + return pytest.LOOP_TEST.read_text(encoding="utf-8") + + @pytest.fixture(autouse=True) def deactivate_tracing(monkeypatch): monkeypatch.setenv("LANGFLOW_DEACTIVATE_TRACING", "true") diff --git a/src/backend/tests/data/LoopTest.json b/src/backend/tests/data/LoopTest.json new file mode 100644 index 000000000000..ed31ebe8d04c --- /dev/null +++ b/src/backend/tests/data/LoopTest.json @@ -0,0 +1,1624 @@ +{ + "id": "713390de-f9b7-4c4b-b3c4-2678a9973ea3", + "data": { + "nodes": [ + { + "id": "ParseData-HI264", + "type": "genericNode", + "position": { + "x": 1519.4837108212814, + "y": 724.0614553725009 + }, + "data": { + "node": { + "template": { + "_type": "Component", + "data": { + "tool_mode": false, + "trace_as_metadata": true, + "list": true, + "trace_as_input": true, + "required": false, + "placeholder": "", + "show": true, + "name": "data", + "value": "", + "display_name": "Data", + "advanced": false, + "input_types": [ + "Data" + ], + "dynamic": false, + "info": "The data to convert to text.", + "title_case": false, + "type": "other", + "_input_type": "DataInput" + }, + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "from langflow.custom import Component\nfrom langflow.helpers.data import data_to_text, data_to_text_list\nfrom langflow.io import DataInput, MultilineInput, Output, StrInput\nfrom langflow.schema import Data\nfrom langflow.schema.message import Message\n\n\nclass ParseDataComponent(Component):\n display_name = \"Parse Data\"\n description = \"Convert Data into plain text following a specified template.\"\n icon = \"braces\"\n name = \"ParseData\"\n\n inputs = [\n DataInput(name=\"data\", display_name=\"Data\", info=\"The data to convert to text.\", is_list=True),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. \"\n \"It can contain the keys {text}, {data} or any other key in the Data.\",\n value=\"{text}\",\n ),\n StrInput(name=\"sep\", display_name=\"Separator\", advanced=True, value=\"\\n\"),\n ]\n\n outputs = [\n Output(\n display_name=\"Text\",\n name=\"text\",\n info=\"Data as a single Message, with each input Data separated by Separator\",\n method=\"parse_data\",\n ),\n Output(\n display_name=\"Data List\",\n name=\"data_list\",\n info=\"Data as a list of new Data, each having `text` formatted by Template\",\n method=\"parse_data_as_list\",\n ),\n ]\n\n def _clean_args(self) -> tuple[list[Data], str, str]:\n data = self.data if isinstance(self.data, list) else [self.data]\n template = self.template\n sep = self.sep\n return data, template, sep\n\n def parse_data(self) -> Message:\n data, template, sep = self._clean_args()\n result_string = data_to_text(template, data, sep)\n self.status = result_string\n return Message(text=result_string)\n\n def parse_data_as_list(self) -> list[Data]:\n data, template, _ = self._clean_args()\n text_list, data_list = data_to_text_list(template, data)\n for item, text in zip(data_list, text_list, strict=True):\n item.set_text(text)\n self.status = data_list\n return data_list\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "sep": { + "tool_mode": false, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "sep", + "value": "\n", + "display_name": "Separator", + "advanced": true, + "dynamic": false, + "info": "", + "title_case": false, + "type": "str", + "_input_type": "StrInput" + }, + "template": { + "tool_mode": false, + "trace_as_input": true, + "multiline": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "template", + "value": "{text}", + "display_name": "Template", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.", + "title_case": false, + "type": "str", + "_input_type": "MultilineInput" + } + }, + "description": "Convert Data into plain text following a specified template.", + "icon": "braces", + "base_classes": [ + "Data", + "Message" + ], + "display_name": "Parse Data", + "documentation": "", + "minimized": false, + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Message" + ], + "selected": "Message", + "name": "text", + "display_name": "Text", + "method": "parse_data", + "value": "__UNDEFINED__", + "cache": true + }, + { + "types": [ + "Data" + ], + "selected": "Data", + "name": "data_list", + "display_name": "Data List", + "method": "parse_data_as_list", + "value": "__UNDEFINED__", + "cache": true, + "hidden": true + } + ], + "field_order": [ + "data", + "template", + "sep" + ], + "beta": false, + "legacy": false, + "edited": false, + "metadata": {}, + "tool_mode": false, + "category": "processing", + "key": "ParseData", + "score": 0.007568328950209746, + "lf_version": "1.1.1" + }, + "showNode": true, + "type": "ParseData", + "id": "ParseData-HI264" + }, + "selected": false, + "measured": { + "width": 320, + "height": 294 + }, + "dragging": false + }, + { + "id": "MessagetoData-02q2m", + "type": "genericNode", + "position": { + "x": 828.0748930410606, + "y": 444.7212170217783 + }, + "data": { + "node": { + "template": { + "_type": "Component", + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "from loguru import logger\n\nfrom langflow.custom import Component\nfrom langflow.io import MessageInput, Output\nfrom langflow.schema import Data\nfrom langflow.schema.message import Message\n\n\nclass MessageToDataComponent(Component):\n display_name = \"Message to Data\"\n description = \"Convert a Message object to a Data object\"\n icon = \"message-square-share\"\n beta = True\n name = \"MessagetoData\"\n\n inputs = [\n MessageInput(\n name=\"message\",\n display_name=\"Message\",\n info=\"The Message object to convert to a Data object\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"data\", method=\"convert_message_to_data\"),\n ]\n\n def convert_message_to_data(self) -> Data:\n if isinstance(self.message, Message):\n # Convert Message to Data\n return Data(data=self.message.data)\n\n msg = \"Error converting Message to Data: Input must be a Message object\"\n logger.opt(exception=True).debug(msg)\n self.status = msg\n return Data(data={\"error\": msg})\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "message": { + "trace_as_input": true, + "tool_mode": false, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "message", + "value": "", + "display_name": "Message", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The Message object to convert to a Data object", + "title_case": false, + "type": "str", + "_input_type": "MessageInput" + } + }, + "description": "Convert a Message object to a Data object", + "icon": "message-square-share", + "base_classes": [ + "Data" + ], + "display_name": "Message to Data", + "documentation": "", + "minimized": false, + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Data" + ], + "selected": "Data", + "name": "data", + "display_name": "Data", + "method": "convert_message_to_data", + "value": "__UNDEFINED__", + "cache": true + } + ], + "field_order": [ + "message" + ], + "beta": true, + "legacy": false, + "edited": false, + "metadata": {}, + "tool_mode": false, + "lf_version": "1.1.1" + }, + "showNode": true, + "type": "MessagetoData", + "id": "MessagetoData-02q2m" + }, + "selected": false, + "measured": { + "width": 320, + "height": 230 + }, + "dragging": false + }, + { + "id": "MessagetoData-dHnzn", + "type": "genericNode", + "position": { + "x": -334.897840488358, + "y": 553.0914016416309 + }, + "data": { + "node": { + "template": { + "_type": "Component", + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "from loguru import logger\n\nfrom langflow.custom import Component\nfrom langflow.io import MessageInput, Output\nfrom langflow.schema import Data\nfrom langflow.schema.message import Message\n\n\nclass MessageToDataComponent(Component):\n display_name = \"Message to Data\"\n description = \"Convert a Message object to a Data object\"\n icon = \"message-square-share\"\n beta = True\n name = \"MessagetoData\"\n\n inputs = [\n MessageInput(\n name=\"message\",\n display_name=\"Message\",\n info=\"The Message object to convert to a Data object\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"data\", method=\"convert_message_to_data\"),\n ]\n\n def convert_message_to_data(self) -> Data:\n if isinstance(self.message, Message):\n # Convert Message to Data\n return Data(data=self.message.data)\n\n msg = \"Error converting Message to Data: Input must be a Message object\"\n logger.opt(exception=True).debug(msg)\n self.status = msg\n return Data(data={\"error\": msg})\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "message": { + "trace_as_input": true, + "tool_mode": false, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "message", + "value": "", + "display_name": "Message", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The Message object to convert to a Data object", + "title_case": false, + "type": "str", + "_input_type": "MessageInput" + } + }, + "description": "Convert a Message object to a Data object", + "icon": "message-square-share", + "base_classes": [ + "Data" + ], + "display_name": "Message to Data", + "documentation": "", + "minimized": false, + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Data" + ], + "selected": "Data", + "name": "data", + "display_name": "Data", + "method": "convert_message_to_data", + "value": "__UNDEFINED__", + "cache": true + } + ], + "field_order": [ + "message" + ], + "beta": true, + "legacy": false, + "edited": false, + "metadata": {}, + "tool_mode": false, + "lf_version": "1.1.1" + }, + "showNode": true, + "type": "MessagetoData", + "id": "MessagetoData-dHnzn" + }, + "selected": false, + "measured": { + "width": 320, + "height": 230 + }, + "dragging": false + }, + { + "id": "ChatInput-dOD8A", + "type": "genericNode", + "position": { + "x": -780.5070511367146, + "y": 477.3880139482486 + }, + "data": { + "node": { + "template": { + "_type": "Component", + "files": { + "trace_as_metadata": true, + "file_path": "", + "fileTypes": [ + "txt", + "md", + "mdx", + "csv", + "json", + "yaml", + "yml", + "xml", + "html", + "htm", + "pdf", + "docx", + "py", + "sh", + "sql", + "js", + "ts", + "tsx", + "jpg", + "jpeg", + "png", + "bmp", + "image" + ], + "list": true, + "required": false, + "placeholder": "", + "show": true, + "name": "files", + "value": "", + "display_name": "Files", + "advanced": true, + "dynamic": false, + "info": "Files to be sent with the message.", + "title_case": false, + "type": "file", + "_input_type": "FileInput" + }, + "background_color": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "background_color", + "value": "", + "display_name": "Background Color", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The background color of the icon.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "chat_icon": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "chat_icon", + "value": "", + "display_name": "Icon", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The icon of the message.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "input_value": { + "tool_mode": false, + "trace_as_input": true, + "multiline": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "input_value", + "value": "Sentence 1. Sentence 2. Sentence 3", + "display_name": "Text", + "advanced": false, + "input_types": [], + "dynamic": false, + "info": "Message to be passed as input.", + "title_case": false, + "type": "str", + "_input_type": "MultilineInput" + }, + "sender": { + "tool_mode": false, + "trace_as_metadata": true, + "options": [ + "Machine", + "User" + ], + "combobox": false, + "required": false, + "placeholder": "", + "show": true, + "name": "sender", + "value": "User", + "display_name": "Sender Type", + "advanced": true, + "dynamic": false, + "info": "Type of sender.", + "title_case": false, + "type": "str", + "_input_type": "DropdownInput" + }, + "sender_name": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "sender_name", + "value": "User", + "display_name": "Sender Name", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Name of the sender.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "session_id": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "session_id", + "value": "", + "display_name": "Session ID", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The session ID of the chat. If empty, the current session ID parameter will be used.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "should_store_message": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "should_store_message", + "value": true, + "display_name": "Store Messages", + "advanced": true, + "dynamic": false, + "info": "Store the message in the history.", + "title_case": false, + "type": "bool", + "_input_type": "BoolInput" + }, + "text_color": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "text_color", + "value": "", + "display_name": "Text Color", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The text color of the name", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + } + }, + "description": "Get chat inputs from the Playground.", + "icon": "MessagesSquare", + "base_classes": [ + "Message" + ], + "display_name": "Chat Input", + "documentation": "", + "minimized": true, + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Message" + ], + "selected": "Message", + "name": "message", + "display_name": "Message", + "method": "message_response", + "value": "__UNDEFINED__", + "cache": true + } + ], + "field_order": [ + "input_value", + "should_store_message", + "sender", + "sender_name", + "session_id", + "files", + "background_color", + "chat_icon", + "text_color" + ], + "beta": false, + "legacy": false, + "edited": false, + "metadata": {}, + "tool_mode": false, + "lf_version": "1.1.1" + }, + "showNode": true, + "type": "ChatInput", + "id": "ChatInput-dOD8A" + }, + "selected": false, + "measured": { + "width": 320, + "height": 230 + }, + "dragging": false + }, + { + "id": "SplitText-d7abl", + "type": "genericNode", + "position": { + "x": 37.5698068780533, + "y": 627.736322287764 + }, + "data": { + "node": { + "template": { + "_type": "Component", + "data_inputs": { + "trace_as_metadata": true, + "list": true, + "required": true, + "placeholder": "", + "show": true, + "name": "data_inputs", + "value": "", + "display_name": "Data Inputs", + "advanced": false, + "input_types": [ + "Data" + ], + "dynamic": false, + "info": "The data to split.", + "title_case": false, + "type": "other", + "_input_type": "HandleInput" + }, + "chunk_overlap": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "chunk_overlap", + "value": 0, + "display_name": "Chunk Overlap", + "advanced": false, + "dynamic": false, + "info": "Number of characters to overlap between chunks.", + "title_case": false, + "type": "int", + "_input_type": "IntInput" + }, + "chunk_size": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "chunk_size", + "value": 10, + "display_name": "Chunk Size", + "advanced": false, + "dynamic": false, + "info": "The maximum number of characters in each chunk.", + "title_case": false, + "type": "int", + "_input_type": "IntInput" + }, + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "from langchain_text_splitters import CharacterTextSplitter\n\nfrom langflow.custom import Component\nfrom langflow.io import HandleInput, IntInput, MessageTextInput, Output\nfrom langflow.schema import Data, DataFrame\nfrom langflow.utils.util import unescape_string\n\n\nclass SplitTextComponent(Component):\n display_name: str = \"Split Text\"\n description: str = \"Split text into chunks based on specified criteria.\"\n icon = \"scissors-line-dashed\"\n name = \"SplitText\"\n\n inputs = [\n HandleInput(\n name=\"data_inputs\",\n display_name=\"Data Inputs\",\n info=\"The data to split.\",\n input_types=[\"Data\"],\n is_list=True,\n required=True,\n ),\n IntInput(\n name=\"chunk_overlap\",\n display_name=\"Chunk Overlap\",\n info=\"Number of characters to overlap between chunks.\",\n value=200,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n info=\"The maximum number of characters in each chunk.\",\n value=1000,\n ),\n MessageTextInput(\n name=\"separator\",\n display_name=\"Separator\",\n info=\"The character to split on. Defaults to newline.\",\n value=\"\\n\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Chunks\", name=\"chunks\", method=\"split_text\"),\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"as_dataframe\"),\n ]\n\n def _docs_to_data(self, docs):\n return [Data(text=doc.page_content, data=doc.metadata) for doc in docs]\n\n def split_text(self) -> list[Data]:\n separator = unescape_string(self.separator)\n\n documents = [_input.to_lc_document() for _input in self.data_inputs if isinstance(_input, Data)]\n\n splitter = CharacterTextSplitter(\n chunk_overlap=self.chunk_overlap,\n chunk_size=self.chunk_size,\n separator=separator,\n )\n docs = splitter.split_documents(documents)\n data = self._docs_to_data(docs)\n self.status = data\n return data\n\n def as_dataframe(self) -> DataFrame:\n return DataFrame(self.split_text())\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "separator": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "separator", + "value": ".", + "display_name": "Separator", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The character to split on. Defaults to newline.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + } + }, + "description": "Split text into chunks based on specified criteria.", + "icon": "scissors-line-dashed", + "base_classes": [ + "Data", + "DataFrame" + ], + "display_name": "Split Text", + "documentation": "", + "minimized": false, + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Data" + ], + "selected": "Data", + "name": "chunks", + "display_name": "Chunks", + "method": "split_text", + "value": "__UNDEFINED__", + "cache": true + }, + { + "types": [ + "DataFrame" + ], + "selected": "DataFrame", + "name": "dataframe", + "display_name": "DataFrame", + "method": "as_dataframe", + "value": "__UNDEFINED__", + "cache": true + } + ], + "field_order": [ + "data_inputs", + "chunk_overlap", + "chunk_size", + "separator" + ], + "beta": false, + "legacy": false, + "edited": false, + "metadata": {}, + "tool_mode": false, + "lf_version": "1.1.1" + }, + "showNode": true, + "type": "SplitText", + "id": "SplitText-d7abl" + }, + "selected": false, + "measured": { + "width": 320, + "height": 507 + }, + "dragging": false + }, + { + "id": "ParseData-Jsbbl", + "type": "genericNode", + "position": { + "x": 1515, + "y": 1290 + }, + "data": { + "node": { + "template": { + "_type": "Component", + "data": { + "tool_mode": false, + "trace_as_metadata": true, + "list": true, + "trace_as_input": true, + "required": false, + "placeholder": "", + "show": true, + "name": "data", + "value": "", + "display_name": "Data", + "advanced": false, + "input_types": [ + "Data" + ], + "dynamic": false, + "info": "The data to convert to text.", + "title_case": false, + "type": "other", + "_input_type": "DataInput" + }, + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "from langflow.custom import Component\nfrom langflow.helpers.data import data_to_text, data_to_text_list\nfrom langflow.io import DataInput, MultilineInput, Output, StrInput\nfrom langflow.schema import Data\nfrom langflow.schema.message import Message\n\n\nclass ParseDataComponent(Component):\n display_name = \"Parse Data\"\n description = \"Convert Data into plain text following a specified template.\"\n icon = \"braces\"\n name = \"ParseData\"\n\n inputs = [\n DataInput(name=\"data\", display_name=\"Data\", info=\"The data to convert to text.\", is_list=True),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. \"\n \"It can contain the keys {text}, {data} or any other key in the Data.\",\n value=\"{text}\",\n ),\n StrInput(name=\"sep\", display_name=\"Separator\", advanced=True, value=\"\\n\"),\n ]\n\n outputs = [\n Output(\n display_name=\"Text\",\n name=\"text\",\n info=\"Data as a single Message, with each input Data separated by Separator\",\n method=\"parse_data\",\n ),\n Output(\n display_name=\"Data List\",\n name=\"data_list\",\n info=\"Data as a list of new Data, each having `text` formatted by Template\",\n method=\"parse_data_as_list\",\n ),\n ]\n\n def _clean_args(self) -> tuple[list[Data], str, str]:\n data = self.data if isinstance(self.data, list) else [self.data]\n template = self.template\n sep = self.sep\n return data, template, sep\n\n def parse_data(self) -> Message:\n data, template, sep = self._clean_args()\n result_string = data_to_text(template, data, sep)\n self.status = result_string\n return Message(text=result_string)\n\n def parse_data_as_list(self) -> list[Data]:\n data, template, _ = self._clean_args()\n text_list, data_list = data_to_text_list(template, data)\n for item, text in zip(data_list, text_list, strict=True):\n item.set_text(text)\n self.status = data_list\n return data_list\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "sep": { + "tool_mode": false, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "sep", + "value": "\n", + "display_name": "Separator", + "advanced": true, + "dynamic": false, + "info": "", + "title_case": false, + "type": "str", + "_input_type": "StrInput" + }, + "template": { + "tool_mode": false, + "trace_as_input": true, + "multiline": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "template", + "value": "{text}", + "display_name": "Template", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.", + "title_case": false, + "type": "str", + "_input_type": "MultilineInput" + } + }, + "description": "Convert Data into plain text following a specified template.", + "icon": "braces", + "base_classes": [ + "Data", + "Message" + ], + "display_name": "Parse Data", + "documentation": "", + "minimized": false, + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Message" + ], + "selected": "Message", + "name": "text", + "display_name": "Text", + "method": "parse_data", + "value": "__UNDEFINED__", + "cache": true + }, + { + "types": [ + "Data" + ], + "selected": "Data", + "name": "data_list", + "display_name": "Data List", + "method": "parse_data_as_list", + "value": "__UNDEFINED__", + "cache": true + } + ], + "field_order": [ + "data", + "template", + "sep" + ], + "beta": false, + "legacy": false, + "edited": false, + "metadata": {}, + "tool_mode": false, + "category": "processing", + "key": "ParseData", + "score": 0.007568328950209746, + "lf_version": "1.1.1" + }, + "showNode": true, + "type": "ParseData", + "id": "ParseData-Jsbbl" + }, + "selected": false, + "measured": { + "width": 320, + "height": 342 + } + }, + { + "id": "ChatOutput-HZiAI", + "type": "genericNode", + "position": { + "x": 1989.185022672821, + "y": 1327.6617206694202 + }, + "data": { + "node": { + "template": { + "_type": "Component", + "background_color": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "background_color", + "value": "", + "display_name": "Background Color", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The background color of the icon.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "chat_icon": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "chat_icon", + "value": "", + "display_name": "Icon", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The icon of the message.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "from langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import DropdownInput, MessageInput, MessageTextInput, Output\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n MessageInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Message to be passed as output.\",\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n source_dict[\"source\"] = source\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n message = self.input_value if isinstance(self.input_value, Message) else Message(text=self.input_value)\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "data_template": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "data_template", + "value": "{text}", + "display_name": "Data Template", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "input_value": { + "trace_as_input": true, + "tool_mode": false, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "input_value", + "value": "", + "display_name": "Text", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Message to be passed as output.", + "title_case": false, + "type": "str", + "_input_type": "MessageInput" + }, + "sender": { + "tool_mode": false, + "trace_as_metadata": true, + "options": [ + "Machine", + "User" + ], + "combobox": false, + "required": false, + "placeholder": "", + "show": true, + "name": "sender", + "value": "Machine", + "display_name": "Sender Type", + "advanced": true, + "dynamic": false, + "info": "Type of sender.", + "title_case": false, + "type": "str", + "_input_type": "DropdownInput" + }, + "sender_name": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "sender_name", + "value": "AI", + "display_name": "Sender Name", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Name of the sender.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "session_id": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "session_id", + "value": "", + "display_name": "Session ID", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The session ID of the chat. If empty, the current session ID parameter will be used.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "should_store_message": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "should_store_message", + "value": true, + "display_name": "Store Messages", + "advanced": true, + "dynamic": false, + "info": "Store the message in the history.", + "title_case": false, + "type": "bool", + "_input_type": "BoolInput" + }, + "text_color": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "required": false, + "placeholder": "", + "show": true, + "name": "text_color", + "value": "", + "display_name": "Text Color", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The text color of the name", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + } + }, + "description": "Display a chat message in the Playground.", + "icon": "MessagesSquare", + "base_classes": [ + "Message" + ], + "display_name": "Chat Output", + "documentation": "", + "minimized": true, + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Message" + ], + "selected": "Message", + "name": "message", + "display_name": "Message", + "method": "message_response", + "value": "__UNDEFINED__", + "cache": true + } + ], + "field_order": [ + "input_value", + "should_store_message", + "sender", + "sender_name", + "session_id", + "data_template", + "background_color", + "chat_icon", + "text_color" + ], + "beta": false, + "legacy": false, + "edited": false, + "metadata": {}, + "tool_mode": false, + "category": "outputs", + "key": "ChatOutput", + "score": 0.003169567463043492, + "lf_version": "1.1.1" + }, + "showNode": true, + "type": "ChatOutput", + "id": "ChatOutput-HZiAI" + }, + "selected": true, + "measured": { + "width": 320, + "height": 230 + }, + "dragging": false + }, + { + "id": "LoopComponent-vFBJP", + "type": "genericNode", + "position": { + "x": 1015.8298592103808, + "y": 1230.6435424847218 + }, + "data": { + "node": { + "template": { + "_type": "Component", + "data": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "trace_as_input": true, + "required": false, + "placeholder": "", + "show": true, + "name": "data", + "value": "", + "display_name": "Data", + "advanced": false, + "input_types": [ + "Data" + ], + "dynamic": false, + "info": "The initial list of Data objects to iterate over.", + "title_case": false, + "type": "other", + "_input_type": "DataInput" + }, + "loop_input": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "trace_as_input": true, + "required": false, + "placeholder": "", + "show": true, + "name": "loop_input", + "value": "", + "display_name": "Loop Input", + "advanced": false, + "input_types": [ + "Data" + ], + "dynamic": false, + "info": "Data to aggregate during the iteration.", + "title_case": false, + "type": "other", + "_input_type": "DataInput" + }, + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "from langflow.custom import Component\nfrom langflow.io import DataInput, Output\nfrom langflow.schema import Data\n\n\nclass LoopComponent(Component):\n display_name = \"Loop\"\n description = (\n \"Iterates over a list of Data objects, outputting one item at a time and aggregating results from loop inputs.\"\n )\n icon = \"infinity\"\n\n inputs = [\n DataInput(name=\"data\", display_name=\"Data\", info=\"The initial list of Data objects to iterate over.\"),\n DataInput(name=\"loop_input\", display_name=\"Loop Input\", info=\"Data to aggregate during the iteration.\"),\n ]\n\n outputs = [\n Output(display_name=\"Item\", name=\"item\", method=\"item_output\"),\n Output(display_name=\"Done\", name=\"done\", method=\"done_output\"),\n ]\n\n def initialize_data(self) -> None:\n \"\"\"Initialize the data list, context index, and aggregated list.\"\"\"\n if self.ctx.get(f\"{self._id}_initialized\", False):\n return\n\n # Ensure data is a list of Data objects\n data_list = self._validate_data(self.data)\n\n # Store the initial data and context variables\n self.update_ctx(\n {\n f\"{self._id}_data\": data_list,\n f\"{self._id}_index\": 0,\n f\"{self._id}_aggregated\": [],\n f\"{self._id}_initialized\": True,\n }\n )\n\n def _validate_data(self, data):\n \"\"\"Validate and return a list of Data objects.\"\"\"\n if isinstance(data, Data):\n return [data]\n if isinstance(data, list) and all(isinstance(item, Data) for item in data):\n return data\n msg = \"The 'data' input must be a list of Data objects or a single Data object.\"\n raise TypeError(msg)\n\n def evaluate_stop_loop(self) -> bool:\n \"\"\"Evaluate whether to stop item or done output.\"\"\"\n current_index = self.ctx.get(f\"{self._id}_index\", 0)\n data_length = len(self.ctx.get(f\"{self._id}_data\", []))\n return current_index > data_length\n\n def item_output(self) -> Data:\n \"\"\"Output the next item in the list or stop if done.\"\"\"\n self.initialize_data()\n current_item = Data(text=\"\")\n\n if self.evaluate_stop_loop():\n self.stop(\"item\")\n return Data(text=\"\")\n\n # Get data list and current index\n data_list, current_index = self.loop_variables()\n if current_index < len(data_list):\n # Output current item and increment index\n try:\n current_item = data_list[current_index]\n except IndexError:\n current_item = Data(text=\"\")\n self.aggregated_output()\n self.update_ctx({f\"{self._id}_index\": current_index + 1})\n return current_item\n\n def done_output(self) -> Data:\n \"\"\"Trigger the done output when iteration is complete.\"\"\"\n self.initialize_data()\n\n if self.evaluate_stop_loop():\n self.stop(\"item\")\n self.start(\"done\")\n\n return self.ctx.get(f\"{self._id}_aggregated\", [])\n self.stop(\"done\")\n return Data(text=\"\")\n\n def loop_variables(self):\n \"\"\"Retrieve loop variables from context.\"\"\"\n return (\n self.ctx.get(f\"{self._id}_data\", []),\n self.ctx.get(f\"{self._id}_index\", 0),\n )\n\n def aggregated_output(self) -> Data:\n \"\"\"Return the aggregated list once all items are processed.\"\"\"\n self.initialize_data()\n\n # Get data list and aggregated list\n data_list = self.ctx.get(f\"{self._id}_data\", [])\n aggregated = self.ctx.get(f\"{self._id}_aggregated\", [])\n\n # Check if loop input is provided and append to aggregated list\n if self.loop_input is not None and not isinstance(self.loop_input, str) and len(aggregated) <= len(data_list):\n aggregated.append(self.loop_input)\n self.update_ctx({f\"{self._id}_aggregated\": aggregated})\n\n return aggregated\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + } + }, + "description": "Iterates over a list of Data objects, outputting one item at a time and aggregating results from loop inputs.", + "icon": "infinity", + "base_classes": [ + "Data" + ], + "display_name": "Loop", + "documentation": "", + "minimized": false, + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Data" + ], + "selected": "Data", + "name": "item", + "display_name": "Item", + "method": "item_output", + "value": "__UNDEFINED__", + "cache": true + }, + { + "types": [ + "Data" + ], + "selected": "Data", + "name": "done", + "display_name": "Done", + "method": "done_output", + "value": "__UNDEFINED__", + "cache": true + } + ], + "field_order": [ + "data", + "loop_input" + ], + "beta": false, + "legacy": false, + "edited": false, + "metadata": {}, + "tool_mode": false, + "lf_version": "1.1.1" + }, + "showNode": true, + "type": "LoopComponent", + "id": "LoopComponent-vFBJP", + "description": "Iterates over a list of Data objects, outputting one item at a time and aggregating results from loop inputs.", + "display_name": "Loop" + }, + "selected": false, + "measured": { + "width": 320, + "height": 324 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "ChatInput-dOD8A", + "sourceHandle": "{œdataTypeœ:œChatInputœ,œidœ:œChatInput-dOD8Aœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}", + "target": "MessagetoData-dHnzn", + "targetHandle": "{œfieldNameœ:œmessageœ,œidœ:œMessagetoData-dHnznœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "data": { + "targetHandle": { + "fieldName": "message", + "id": "MessagetoData-dHnzn", + "inputTypes": [ + "Message" + ], + "type": "str" + }, + "sourceHandle": { + "dataType": "ChatInput", + "id": "ChatInput-dOD8A", + "name": "message", + "output_types": [ + "Message" + ] + } + }, + "id": "reactflow__edge-ChatInput-dOD8A{œdataTypeœ:œChatInputœ,œidœ:œChatInput-dOD8Aœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-MessagetoData-dHnzn{œfieldNameœ:œmessageœ,œidœ:œMessagetoData-dHnznœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "className": "", + "animated": false, + "selected": false + }, + { + "source": "MessagetoData-dHnzn", + "sourceHandle": "{œdataTypeœ:œMessagetoDataœ,œidœ:œMessagetoData-dHnznœ,œnameœ:œdataœ,œoutput_typesœ:[œDataœ]}", + "target": "SplitText-d7abl", + "targetHandle": "{œfieldNameœ:œdata_inputsœ,œidœ:œSplitText-d7ablœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "data": { + "targetHandle": { + "fieldName": "data_inputs", + "id": "SplitText-d7abl", + "inputTypes": [ + "Data" + ], + "type": "other" + }, + "sourceHandle": { + "dataType": "MessagetoData", + "id": "MessagetoData-dHnzn", + "name": "data", + "output_types": [ + "Data" + ] + } + }, + "id": "xy-edge__MessagetoData-dHnzn{œdataTypeœ:œMessagetoDataœ,œidœ:œMessagetoData-dHnznœ,œnameœ:œdataœ,œoutput_typesœ:[œDataœ]}-SplitText-d7abl{œfieldNameœ:œdata_inputsœ,œidœ:œSplitText-d7ablœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "animated": false, + "className": "" + }, + { + "source": "ParseData-HI264", + "sourceHandle": "{œdataTypeœ:œParseDataœ,œidœ:œParseData-HI264œ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", + "target": "MessagetoData-02q2m", + "targetHandle": "{œfieldNameœ:œmessageœ,œidœ:œMessagetoData-02q2mœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "data": { + "targetHandle": { + "fieldName": "message", + "id": "MessagetoData-02q2m", + "inputTypes": [ + "Message" + ], + "type": "str" + }, + "sourceHandle": { + "dataType": "ParseData", + "id": "ParseData-HI264", + "name": "text", + "output_types": [ + "Message" + ] + } + }, + "id": "xy-edge__ParseData-HI264{œdataTypeœ:œParseDataœ,œidœ:œParseData-HI264œ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-MessagetoData-02q2m{œfieldNameœ:œmessageœ,œidœ:œMessagetoData-02q2mœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "animated": false, + "className": "" + }, + { + "source": "ParseData-Jsbbl", + "sourceHandle": "{œdataTypeœ:œParseDataœ,œidœ:œParseData-Jsbblœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", + "target": "ChatOutput-HZiAI", + "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-HZiAIœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "data": { + "targetHandle": { + "fieldName": "input_value", + "id": "ChatOutput-HZiAI", + "inputTypes": [ + "Message" + ], + "type": "str" + }, + "sourceHandle": { + "dataType": "ParseData", + "id": "ParseData-Jsbbl", + "name": "text", + "output_types": [ + "Message" + ] + } + }, + "id": "xy-edge__ParseData-Jsbbl{œdataTypeœ:œParseDataœ,œidœ:œParseData-Jsbblœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-HZiAI{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-HZiAIœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "animated": false, + "className": "" + }, + { + "source": "SplitText-d7abl", + "sourceHandle": "{œdataTypeœ:œSplitTextœ,œidœ:œSplitText-d7ablœ,œnameœ:œchunksœ,œoutput_typesœ:[œDataœ]}", + "target": "LoopComponent-vFBJP", + "targetHandle": "{œfieldNameœ:œdataœ,œidœ:œLoopComponent-vFBJPœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "data": { + "targetHandle": { + "fieldName": "data", + "id": "LoopComponent-vFBJP", + "inputTypes": [ + "Data" + ], + "type": "other" + }, + "sourceHandle": { + "dataType": "SplitText", + "id": "SplitText-d7abl", + "name": "chunks", + "output_types": [ + "Data" + ] + } + }, + "id": "xy-edge__SplitText-d7abl{œdataTypeœ:œSplitTextœ,œidœ:œSplitText-d7ablœ,œnameœ:œchunksœ,œoutput_typesœ:[œDataœ]}-LoopComponent-vFBJP{œfieldNameœ:œdataœ,œidœ:œLoopComponent-vFBJPœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "animated": false, + "className": "" + }, + { + "source": "MessagetoData-02q2m", + "sourceHandle": "{œdataTypeœ:œMessagetoDataœ,œidœ:œMessagetoData-02q2mœ,œnameœ:œdataœ,œoutput_typesœ:[œDataœ]}", + "target": "LoopComponent-vFBJP", + "targetHandle": "{œfieldNameœ:œloop_inputœ,œidœ:œLoopComponent-vFBJPœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "data": { + "targetHandle": { + "fieldName": "loop_input", + "id": "LoopComponent-vFBJP", + "inputTypes": [ + "Data" + ], + "type": "other" + }, + "sourceHandle": { + "dataType": "MessagetoData", + "id": "MessagetoData-02q2m", + "name": "data", + "output_types": [ + "Data" + ] + } + }, + "id": "xy-edge__MessagetoData-02q2m{œdataTypeœ:œMessagetoDataœ,œidœ:œMessagetoData-02q2mœ,œnameœ:œdataœ,œoutput_typesœ:[œDataœ]}-LoopComponent-vFBJP{œfieldNameœ:œloop_inputœ,œidœ:œLoopComponent-vFBJPœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "animated": false, + "className": "" + }, + { + "source": "LoopComponent-vFBJP", + "sourceHandle": "{œdataTypeœ:œLoopComponentœ,œidœ:œLoopComponent-vFBJPœ,œnameœ:œitemœ,œoutput_typesœ:[œDataœ]}", + "target": "ParseData-HI264", + "targetHandle": "{œfieldNameœ:œdataœ,œidœ:œParseData-HI264œ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "data": { + "targetHandle": { + "fieldName": "data", + "id": "ParseData-HI264", + "inputTypes": [ + "Data" + ], + "type": "other" + }, + "sourceHandle": { + "dataType": "LoopComponent", + "id": "LoopComponent-vFBJP", + "name": "item", + "output_types": [ + "Data" + ] + } + }, + "id": "xy-edge__LoopComponent-vFBJP{œdataTypeœ:œLoopComponentœ,œidœ:œLoopComponent-vFBJPœ,œnameœ:œitemœ,œoutput_typesœ:[œDataœ]}-ParseData-HI264{œfieldNameœ:œdataœ,œidœ:œParseData-HI264œ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "animated": false, + "className": "" + }, + { + "source": "LoopComponent-vFBJP", + "sourceHandle": "{œdataTypeœ:œLoopComponentœ,œidœ:œLoopComponent-vFBJPœ,œnameœ:œdoneœ,œoutput_typesœ:[œDataœ]}", + "target": "ParseData-Jsbbl", + "targetHandle": "{œfieldNameœ:œdataœ,œidœ:œParseData-Jsbblœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "data": { + "targetHandle": { + "fieldName": "data", + "id": "ParseData-Jsbbl", + "inputTypes": [ + "Data" + ], + "type": "other" + }, + "sourceHandle": { + "dataType": "LoopComponent", + "id": "LoopComponent-vFBJP", + "name": "done", + "output_types": [ + "Data" + ] + } + }, + "id": "xy-edge__LoopComponent-vFBJP{œdataTypeœ:œLoopComponentœ,œidœ:œLoopComponent-vFBJPœ,œnameœ:œdoneœ,œoutput_typesœ:[œDataœ]}-ParseData-Jsbbl{œfieldNameœ:œdataœ,œidœ:œParseData-Jsbblœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "animated": false, + "className": "" + } + ], + "viewport": { + "x": 398.35329389327603, + "y": 66.60635240558531, + "zoom": 0.4260501062620726 + } + }, + "description": "test loop", + "name": "LoopTest", + "last_tested_version": "1.1.1", + "endpoint_name": null, + "is_component": false +} \ No newline at end of file diff --git a/src/backend/tests/unit/components/logic/__init__.py b/src/backend/tests/unit/components/logic/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/src/backend/tests/unit/components/logic/test_loop.py b/src/backend/tests/unit/components/logic/test_loop.py new file mode 100644 index 000000000000..8cc6ca9d058c --- /dev/null +++ b/src/backend/tests/unit/components/logic/test_loop.py @@ -0,0 +1,90 @@ +from uuid import UUID + +import pytest +from httpx import AsyncClient +from langflow.components.logic.loop import LoopComponent +from langflow.memory import aget_messages +from langflow.schema.data import Data +from langflow.services.database.models.flow import FlowCreate +from orjson import orjson + +from tests.base import ComponentTestBaseWithClient + +TEXT = ( + "lorem ipsum dolor sit amet lorem ipsum dolor sit amet lorem ipsum dolor sit amet. " + "lorem ipsum dolor sit amet lorem ipsum dolor sit amet lorem ipsum dolor sit amet. " + "lorem ipsum dolor sit amet lorem ipsum dolor sit amet lorem ipsum dolor sit amet." +) + + +class TestLoopComponentWithAPI(ComponentTestBaseWithClient): + @pytest.fixture + def component_class(self): + """Return the component class to test.""" + return LoopComponent + + @pytest.fixture + def file_names_mapping(self): + """Return an empty list since this component doesn't have version-specific files.""" + return [] + + @pytest.fixture + def default_kwargs(self): + """Return the default kwargs for the component.""" + return { + "data": [[Data(text="Hello World")]], + "loop_input": [Data(text=TEXT)], + } + + def test_latest_version(self, default_kwargs) -> None: + """Test that the component works with the latest version.""" + result = LoopComponent(**default_kwargs) + assert result is not None, "Component returned None for the latest version." + + async def _create_flow(self, client, json_loop_test, logged_in_headers): + vector_store = orjson.loads(json_loop_test) + data = vector_store["data"] + vector_store = FlowCreate(name="Flow", description="description", data=data, endpoint_name="f") + response = await client.post("api/v1/flows/", json=vector_store.model_dump(), headers=logged_in_headers) + response.raise_for_status() + return response.json()["id"] + + async def check_messages(self, flow_id): + messages = await aget_messages(flow_id=UUID(flow_id), order="ASC") + assert len(messages) == 2 + assert messages[0].session_id == flow_id + assert messages[0].sender == "User" + assert messages[0].sender_name == "User" + assert messages[0].text != "" + assert messages[1].session_id == flow_id + assert messages[1].sender == "Machine" + assert messages[1].sender_name == "AI" + assert len(messages[1].text) > 0 + + async def test_build_flow_loop(self, client, json_loop_test, logged_in_headers): + flow_id = await self._create_flow(client, json_loop_test, logged_in_headers) + + async with client.stream("POST", f"api/v1/build/{flow_id}/flow", json={}, headers=logged_in_headers) as r: + async for line in r.aiter_lines(): + # httpx split by \n, but ndjson sends two \n for each line + if line: + # Process the line if needed + pass + + await self.check_messages(flow_id) + + async def test_run_flow_loop(self, client: AsyncClient, created_api_key, json_loop_test, logged_in_headers): + flow_id = await self._create_flow(client, json_loop_test, logged_in_headers) + headers = {"x-api-key": created_api_key.api_key} + payload = { + "input_value": TEXT, + "input_type": "chat", + "session_id": f"{flow_id}run", + "output_type": "chat", + "tweaks": {}, + } + response = await client.post(f"/api/v1/run/{flow_id}", json=payload, headers=headers) + data = response.json() + assert "outputs" in data + assert "session_id" in data + assert len(data["outputs"][-1]["outputs"]) > 0 From 303779e40aafbbd078b8d68d7e736567c10528a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vin=C3=ADcios=20Batista=20da=20Silva?= Date: Thu, 16 Jan 2025 15:37:14 -0300 Subject: [PATCH 09/22] feat: improve model input fields for Cohere component (#5712) feat: improve model input fields for cohere component 1. Make api_key field required 2. Convert temperature to SliderInput with range 0-2 3. Add info description to temperature slider --- .../base/langflow/components/models/cohere.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/backend/base/langflow/components/models/cohere.py b/src/backend/base/langflow/components/models/cohere.py index e1957d24dc77..22c43b373772 100644 --- a/src/backend/base/langflow/components/models/cohere.py +++ b/src/backend/base/langflow/components/models/cohere.py @@ -3,7 +3,8 @@ from langflow.base.models.model import LCModelComponent from langflow.field_typing import LanguageModel -from langflow.io import FloatInput, SecretStrInput +from langflow.field_typing.range_spec import RangeSpec +from langflow.io import SecretStrInput, SliderInput class CohereComponent(LCModelComponent): @@ -21,8 +22,15 @@ class CohereComponent(LCModelComponent): info="The Cohere API Key to use for the Cohere model.", advanced=False, value="COHERE_API_KEY", + required=True, + ), + SliderInput( + name="temperature", + display_name="Temperature", + value=0.75, + range_spec=RangeSpec(min=0, max=2, step=0.01), + info="Controls randomness. Lower values are more deterministic, higher values are more creative.", ), - FloatInput(name="temperature", display_name="Temperature", value=0.75), ] def build_model(self) -> LanguageModel: # type: ignore[type-var] From b55c9f16987934ddb21b303031e0caa78d171a17 Mon Sep 17 00:00:00 2001 From: Raphael Valdetaro <79842132+raphaelchristi@users.noreply.github.com> Date: Thu, 16 Jan 2025 15:37:18 -0300 Subject: [PATCH 10/22] refactor: improve naming consistency in DataCombiner component (#5471) * refactor: improve naming consistency in DataCombiner component - Rename MergeOperation to DataOperation - Rename component to DataCombinerComponent - Convert operation enum values to uppercase - Update method names for consistency * [autofix.ci] apply automated fixes * fix: resolved linting errors in __init__.py * [autofix.ci] apply automated fixes * Changed operation names to capitalize only first letter * refactor: rename DataCombinerComponent to MergeDataComponent for better clarity and backwards compatibility * [autofix.ci] apply automated fixes * fix: Translate Portuguese text to English in merge_data.py * feat: add required to data_inputs in MergeDataComponent --------- Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: Edwin Jose --- .../components/processing/merge_data.py | 48 +++++++++---------- 1 file changed, 23 insertions(+), 25 deletions(-) diff --git a/src/backend/base/langflow/components/processing/merge_data.py b/src/backend/base/langflow/components/processing/merge_data.py index bfefe19e2758..31d338c35b8c 100644 --- a/src/backend/base/langflow/components/processing/merge_data.py +++ b/src/backend/base/langflow/components/processing/merge_data.py @@ -8,50 +8,48 @@ from langflow.schema import DataFrame -class MergeOperation(str, Enum): - CONCATENATE = "concatenate" - APPEND = "append" - MERGE = "merge" - JOIN = "join" +class DataOperation(str, Enum): + CONCATENATE = "Concatenate" + APPEND = "Append" + MERGE = "Merge" + JOIN = "Join" class MergeDataComponent(Component): - display_name = "Merge Data" - description = "Combines data using merge operations" + display_name = "Data Combiner" + description = "Combines data using different operations" icon = "merge" - MIN_INPUTS_REQUIRED = 2 inputs = [ - DataInput(name="data_inputs", display_name="Data Inputs", info="Dados para combinar", is_list=True), + DataInput(name="data_inputs", display_name="Data Inputs", info="Data to combine", is_list=True, required=True), DropdownInput( name="operation", - display_name="Merge Operation", - options=[op.value for op in MergeOperation], - value=MergeOperation.CONCATENATE.value, + display_name="Operation Type", + options=[op.value for op in DataOperation], + value=DataOperation.CONCATENATE.value, ), ] + outputs = [Output(display_name="DataFrame", name="combined_data", method="combine_data")] - outputs = [Output(display_name="DataFrame", name="merged_data", method="merge_data")] - - def merge_data(self) -> DataFrame: + def combine_data(self) -> DataFrame: if not self.data_inputs or len(self.data_inputs) < self.MIN_INPUTS_REQUIRED: empty_dataframe = DataFrame() self.status = empty_dataframe return empty_dataframe - operation = MergeOperation(self.operation) + operation = DataOperation(self.operation) try: - merged_dataframe = self._process_operation(operation) - self.status = merged_dataframe + combined_dataframe = self._process_operation(operation) + self.status = combined_dataframe except Exception as e: - logger.error(f"Erro durante operação {operation}: {e!s}") + logger.error(f"Error during operation {operation}: {e!s}") raise else: - return merged_dataframe + return combined_dataframe - def _process_operation(self, operation: MergeOperation) -> DataFrame: - if operation == MergeOperation.CONCATENATE: + def _process_operation(self, operation: DataOperation) -> DataFrame: + if operation == DataOperation.CONCATENATE: combined_data: dict[str, str | object] = {} for data_input in self.data_inputs: for key, value in data_input.data.items(): @@ -64,11 +62,11 @@ def _process_operation(self, operation: MergeOperation) -> DataFrame: combined_data[key] = value return DataFrame([combined_data]) - if operation == MergeOperation.APPEND: + if operation == DataOperation.APPEND: rows = [data_input.data for data_input in self.data_inputs] return DataFrame(rows) - if operation == MergeOperation.MERGE: + if operation == DataOperation.MERGE: result_data: dict[str, str | list[str] | object] = {} for data_input in self.data_inputs: for key, value in data_input.data.items(): @@ -81,7 +79,7 @@ def _process_operation(self, operation: MergeOperation) -> DataFrame: result_data[key] = value return DataFrame([result_data]) - if operation == MergeOperation.JOIN: + if operation == DataOperation.JOIN: combined_data = {} for idx, data_input in enumerate(self.data_inputs, 1): for key, value in data_input.data.items(): From d89f8e8e186b8e47c0d494d68e4c1ed696b63119 Mon Sep 17 00:00:00 2001 From: Raphael Valdetaro <79842132+raphaelchristi@users.noreply.github.com> Date: Thu, 16 Jan 2025 15:37:26 -0300 Subject: [PATCH 11/22] refactor: Refactor Wikipedia API component (#5432) * refactor(wikipedia): Refactor Wikipedia API component * test: add unit tests for WikipediaAPIComponent * [autofix.ci] apply automated fixes * refactor: improve WikipediaAPIComponent tests and fix lint issues * [autofix.ci] apply automated fixes * fix: resolve lint issues in WikipediaAPIComponent tests --------- Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: Edwin Jose --- .../components/tools/wikipedia_api.py | 28 +++--- .../components/tools/test_wikipedia_api.py | 85 +++++++++++++++++++ 2 files changed, 103 insertions(+), 10 deletions(-) create mode 100644 src/backend/tests/unit/components/tools/test_wikipedia_api.py diff --git a/src/backend/base/langflow/components/tools/wikipedia_api.py b/src/backend/base/langflow/components/tools/wikipedia_api.py index 623378d24934..32adaa6b9e07 100644 --- a/src/backend/base/langflow/components/tools/wikipedia_api.py +++ b/src/backend/base/langflow/components/tools/wikipedia_api.py @@ -1,15 +1,13 @@ -from typing import cast - -from langchain_community.tools import WikipediaQueryRun from langchain_community.utilities.wikipedia import WikipediaAPIWrapper -from langflow.base.langchain_utilities.model import LCToolComponent -from langflow.field_typing import Tool +from langflow.custom import Component from langflow.inputs import BoolInput, IntInput, MessageTextInput, MultilineInput +from langflow.io import Output from langflow.schema import Data +from langflow.schema.message import Message -class WikipediaAPIComponent(LCToolComponent): +class WikipediaAPIComponent(Component): display_name = "Wikipedia API" description = "Call Wikipedia API." name = "WikipediaAPI" @@ -19,6 +17,7 @@ class WikipediaAPIComponent(LCToolComponent): MultilineInput( name="input_value", display_name="Input", + tool_mode=True, ), MessageTextInput(name="lang", display_name="Language", value="en"), IntInput(name="k", display_name="Number of results", value=4, required=True), @@ -28,16 +27,25 @@ class WikipediaAPIComponent(LCToolComponent): ), ] - def run_model(self) -> list[Data]: + outputs = [ + Output(display_name="Data", name="data", method="fetch_content"), + Output(display_name="Text", name="text", method="fetch_content_text"), + ] + + def fetch_content(self) -> list[Data]: wrapper = self._build_wrapper() docs = wrapper.load(self.input_value) data = [Data.from_document(doc) for doc in docs] self.status = data return data - def build_tool(self) -> Tool: - wrapper = self._build_wrapper() - return cast("Tool", WikipediaQueryRun(api_wrapper=wrapper)) + def fetch_content_text(self) -> Message: + data = self.fetch_content() + result_string = "" + for item in data: + result_string += item.text + "\n" + self.status = result_string + return Message(text=result_string) def _build_wrapper(self) -> WikipediaAPIWrapper: return WikipediaAPIWrapper( diff --git a/src/backend/tests/unit/components/tools/test_wikipedia_api.py b/src/backend/tests/unit/components/tools/test_wikipedia_api.py new file mode 100644 index 000000000000..fc331caba2f7 --- /dev/null +++ b/src/backend/tests/unit/components/tools/test_wikipedia_api.py @@ -0,0 +1,85 @@ +from unittest.mock import MagicMock + +import pytest +from langflow.components.tools import WikipediaAPIComponent +from langflow.custom import Component +from langflow.custom.utils import build_custom_component_template +from langflow.schema import Data +from langflow.schema.message import Message + + +def test_wikipedia_initialization(): + component = WikipediaAPIComponent() + assert component.display_name == "Wikipedia API" + assert component.description == "Call Wikipedia API." + assert component.icon == "Wikipedia" + + +def test_wikipedia_template(): + wikipedia = WikipediaAPIComponent() + component = Component(_code=wikipedia._code) + frontend_node, _ = build_custom_component_template(component) + + # Verify basic structure + assert isinstance(frontend_node, dict) + + # Verify inputs + assert "template" in frontend_node + input_names = [input_["name"] for input_ in frontend_node["template"].values() if isinstance(input_, dict)] + + expected_inputs = ["input_value", "lang", "k", "load_all_available_meta", "doc_content_chars_max"] + + for input_name in expected_inputs: + assert input_name in input_names + + +@pytest.fixture +def mock_wikipedia_wrapper(mocker): + return mocker.patch("langchain_community.utilities.wikipedia.WikipediaAPIWrapper") + + +def test_fetch_content(mock_wikipedia_wrapper): + component = WikipediaAPIComponent() + component.input_value = "test query" + component.k = 3 + component.lang = "en" + + # Mock the WikipediaAPIWrapper and its load method + mock_instance = MagicMock() + mock_wikipedia_wrapper.return_value = mock_instance + mock_doc = MagicMock() + mock_doc.page_content = "Test content" + mock_doc.metadata = {"source": "wikipedia", "title": "Test Page"} + mock_instance.load.return_value = [mock_doc] + + # Mock the _build_wrapper method to return our mock instance + component._build_wrapper = MagicMock(return_value=mock_instance) + + result = component.fetch_content() + + # Verify wrapper was built with correct params + component._build_wrapper.assert_called_once() + mock_instance.load.assert_called_once_with("test query") + assert isinstance(result, list) + assert len(result) == 1 + assert result[0].text == "Test content" + + +def test_fetch_content_text(): + component = WikipediaAPIComponent() + component.fetch_content = MagicMock(return_value=[Data(text="First result"), Data(text="Second result")]) + + result = component.fetch_content_text() + + assert isinstance(result, Message) + assert result.text == "First result\nSecond result\n" + + +def test_wikipedia_error_handling(): + component = WikipediaAPIComponent() + + # Mock _build_wrapper to raise exception + component._build_wrapper = MagicMock(side_effect=Exception("API Error")) + + with pytest.raises(Exception, match="API Error"): + component.fetch_content() From 90f570edd4803347c80af1f542212f037e899a36 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 16 Jan 2025 11:06:14 -0800 Subject: [PATCH 12/22] chore: update test durations (#5736) Co-authored-by: ogabrielluiz <24829397+ogabrielluiz@users.noreply.github.com> --- src/backend/tests/.test_durations | 1942 ++++++++++++++--------------- 1 file changed, 971 insertions(+), 971 deletions(-) diff --git a/src/backend/tests/.test_durations b/src/backend/tests/.test_durations index d1324b685a37..15d18bc90bcd 100644 --- a/src/backend/tests/.test_durations +++ b/src/backend/tests/.test_durations @@ -67,164 +67,164 @@ "src/backend/tests/test_webhook.py::test_webhook_endpoint": 8.848518459000388, "src/backend/tests/test_webhook.py::test_webhook_flow_on_run_endpoint": 4.675444458000584, "src/backend/tests/test_webhook.py::test_webhook_with_random_payload": 5.161753501000476, - "src/backend/tests/unit/api/test_api_utils.py::test_get_outdated_components": 0.001822874037316069, - "src/backend/tests/unit/api/test_api_utils.py::test_get_suggestion_message": 0.0022302090073935688, - "src/backend/tests/unit/api/v1/test_api_key.py::test_create_api_key_route": 2.0688239169539884, - "src/backend/tests/unit/api/v1/test_api_key.py::test_create_folder": 62.357504083018284, - "src/backend/tests/unit/api/v1/test_api_key.py::test_delete_api_key_route": 1.980057917011436, - "src/backend/tests/unit/api/v1/test_api_key.py::test_save_store_api_key": 1.8916740409622435, - "src/backend/tests/unit/api/v1/test_endpoints.py::test_get_config": 1.2798670000047423, - "src/backend/tests/unit/api/v1/test_endpoints.py::test_get_version": 1.4769667500222567, - "src/backend/tests/unit/api/v1/test_endpoints.py::test_update_component_outputs": 2.179903791024117, - "src/backend/tests/unit/api/v1/test_files.py::test_delete_file": 2.584503874008078, - "src/backend/tests/unit/api/v1/test_files.py::test_download_file": 1.9315963339759037, - "src/backend/tests/unit/api/v1/test_files.py::test_file_operations": 3.4085357090225443, - "src/backend/tests/unit/api/v1/test_files.py::test_list_files": 2.386070208012825, - "src/backend/tests/unit/api/v1/test_files.py::test_upload_file": 2.066325791005511, - "src/backend/tests/unit/api/v1/test_files.py::test_upload_file_size_limit": 2.1683006249950267, - "src/backend/tests/unit/api/v1/test_flows.py::test_create_flow": 2.023947125970153, - "src/backend/tests/unit/api/v1/test_flows.py::test_create_flows": 2.15371391698136, - "src/backend/tests/unit/api/v1/test_flows.py::test_read_basic_examples": 2.245407290989533, - "src/backend/tests/unit/api/v1/test_flows.py::test_read_flow": 62.12677004100988, - "src/backend/tests/unit/api/v1/test_flows.py::test_read_flows": 1.9904386250127573, - "src/backend/tests/unit/api/v1/test_flows.py::test_update_flow": 2.0406544170109555, - "src/backend/tests/unit/api/v1/test_folders.py::test_create_folder": 1.847301583038643, - "src/backend/tests/unit/api/v1/test_folders.py::test_read_folder": 2.355843333003577, - "src/backend/tests/unit/api/v1/test_folders.py::test_read_folders": 1.9612517499772366, - "src/backend/tests/unit/api/v1/test_folders.py::test_update_folder": 2.0699339150160085, - "src/backend/tests/unit/api/v1/test_starter_projects.py::test_get_starter_projects": 5.666953082050895, - "src/backend/tests/unit/api/v1/test_store.py::test_check_if_store_is_enabled": 2.435283333004918, - "src/backend/tests/unit/api/v1/test_users.py::test_add_user": 1.8681874999892898, - "src/backend/tests/unit/api/v1/test_users.py::test_delete_user": 1.761451958969701, - "src/backend/tests/unit/api/v1/test_users.py::test_patch_user": 2.162207876011962, - "src/backend/tests/unit/api/v1/test_users.py::test_read_all_users": 2.1012162909901235, - "src/backend/tests/unit/api/v1/test_users.py::test_read_current_user": 2.1308808339817915, - "src/backend/tests/unit/api/v1/test_users.py::test_reset_password": 2.362140749988612, - "src/backend/tests/unit/api/v1/test_validate.py::test_post_validate_code": 1.009794666984817, - "src/backend/tests/unit/api/v1/test_validate.py::test_post_validate_prompt": 62.12022974996944, - "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable": 2.0105291240033694, + "src/backend/tests/unit/api/test_api_utils.py::test_get_outdated_components": 0.0017631249999681131, + "src/backend/tests/unit/api/test_api_utils.py::test_get_suggestion_message": 0.0022670849999713028, + "src/backend/tests/unit/api/v1/test_api_key.py::test_create_api_key_route": 1.7481415339999842, + "src/backend/tests/unit/api/v1/test_api_key.py::test_create_folder": 26.41031511899999, + "src/backend/tests/unit/api/v1/test_api_key.py::test_delete_api_key_route": 1.7893139390000101, + "src/backend/tests/unit/api/v1/test_api_key.py::test_save_store_api_key": 1.6870583510000188, + "src/backend/tests/unit/api/v1/test_endpoints.py::test_get_config": 1.2298859289999768, + "src/backend/tests/unit/api/v1/test_endpoints.py::test_get_version": 1.7736475330000019, + "src/backend/tests/unit/api/v1/test_endpoints.py::test_update_component_outputs": 1.7560738239999978, + "src/backend/tests/unit/api/v1/test_files.py::test_delete_file": 1.778861317999997, + "src/backend/tests/unit/api/v1/test_files.py::test_download_file": 1.7161842939999872, + "src/backend/tests/unit/api/v1/test_files.py::test_file_operations": 1.8359752190000336, + "src/backend/tests/unit/api/v1/test_files.py::test_list_files": 2.2975250519999975, + "src/backend/tests/unit/api/v1/test_files.py::test_upload_file": 1.6733708309999997, + "src/backend/tests/unit/api/v1/test_files.py::test_upload_file_size_limit": 1.7182308050000188, + "src/backend/tests/unit/api/v1/test_flows.py::test_create_flow": 1.7455541559999972, + "src/backend/tests/unit/api/v1/test_flows.py::test_create_flows": 1.9661456810000288, + "src/backend/tests/unit/api/v1/test_flows.py::test_read_basic_examples": 1.7919677730000103, + "src/backend/tests/unit/api/v1/test_flows.py::test_read_flow": 2.38362393700001, + "src/backend/tests/unit/api/v1/test_flows.py::test_read_flows": 1.7488259079999864, + "src/backend/tests/unit/api/v1/test_flows.py::test_update_flow": 1.7746817880000094, + "src/backend/tests/unit/api/v1/test_folders.py::test_create_folder": 1.778121101000039, + "src/backend/tests/unit/api/v1/test_folders.py::test_read_folder": 1.8005390859999864, + "src/backend/tests/unit/api/v1/test_folders.py::test_read_folders": 1.7822004289999995, + "src/backend/tests/unit/api/v1/test_folders.py::test_update_folder": 2.5431134970000073, + "src/backend/tests/unit/api/v1/test_starter_projects.py::test_get_starter_projects": 2.389354776999994, + "src/backend/tests/unit/api/v1/test_store.py::test_check_if_store_is_enabled": 1.3543506740000169, + "src/backend/tests/unit/api/v1/test_users.py::test_add_user": 1.6322982810000042, + "src/backend/tests/unit/api/v1/test_users.py::test_delete_user": 2.954711787000008, + "src/backend/tests/unit/api/v1/test_users.py::test_patch_user": 2.424689332000014, + "src/backend/tests/unit/api/v1/test_users.py::test_read_all_users": 1.8432600909999906, + "src/backend/tests/unit/api/v1/test_users.py::test_read_current_user": 1.854633927000009, + "src/backend/tests/unit/api/v1/test_users.py::test_reset_password": 2.3468312130000015, + "src/backend/tests/unit/api/v1/test_validate.py::test_post_validate_code": 1.4170512360000203, + "src/backend/tests/unit/api/v1/test_validate.py::test_post_validate_prompt": 1.398274219000001, + "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable": 1.9472019379999779, "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable__Exception": 5.891528583015315, "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable__HTTPException": 2.8841335409670137, - "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable__exception": 2.022962749004364, - "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable__httpexception": 2.2306703330250457, + "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable__exception": 2.0054446849999863, + "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable__httpexception": 2.8701288899999895, "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable__variable_name_alread_exists": 3.690157334029209, - "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable__variable_name_already_exists": 2.276717043016106, - "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable__variable_name_and_value_cannot_be_empty": 1.8685768339782953, - "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable__variable_name_cannot_be_empty": 1.7683035000227392, - "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable__variable_value_cannot_be_empty": 1.9370164579886477, - "src/backend/tests/unit/api/v1/test_variable.py::test_delete_variable": 2.4603272920066956, + "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable__variable_name_already_exists": 2.008939483000006, + "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable__variable_name_and_value_cannot_be_empty": 1.9462890819999927, + "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable__variable_name_cannot_be_empty": 1.922864575999995, + "src/backend/tests/unit/api/v1/test_variable.py::test_create_variable__variable_value_cannot_be_empty": 1.9532296369999642, + "src/backend/tests/unit/api/v1/test_variable.py::test_delete_variable": 2.1662082399999747, "src/backend/tests/unit/api/v1/test_variable.py::test_delete_variable__Exception": 3.1565893749939278, - "src/backend/tests/unit/api/v1/test_variable.py::test_delete_variable__exception": 1.6004500830022153, - "src/backend/tests/unit/api/v1/test_variable.py::test_read_variables": 2.2641518330201507, - "src/backend/tests/unit/api/v1/test_variable.py::test_read_variables__": 3.5150550410035066, - "src/backend/tests/unit/api/v1/test_variable.py::test_read_variables__empty": 2.79509312598384, - "src/backend/tests/unit/api/v1/test_variable.py::test_update_variable": 2.470824124000501, + "src/backend/tests/unit/api/v1/test_variable.py::test_delete_variable__exception": 2.081866199999979, + "src/backend/tests/unit/api/v1/test_variable.py::test_read_variables": 2.1389514480000003, + "src/backend/tests/unit/api/v1/test_variable.py::test_read_variables__": 2.0445527869999864, + "src/backend/tests/unit/api/v1/test_variable.py::test_read_variables__empty": 2.1289878469999906, + "src/backend/tests/unit/api/v1/test_variable.py::test_update_variable": 2.1443009239999924, "src/backend/tests/unit/api/v1/test_variable.py::test_update_variable__Exception": 3.202228542009834, - "src/backend/tests/unit/api/v1/test_variable.py::test_update_variable__exception": 2.2278289160167333, - "src/backend/tests/unit/base/load/test_load.py::test_run_flow_from_json_params": 0.000896166980965063, + "src/backend/tests/unit/api/v1/test_variable.py::test_update_variable__exception": 2.105398094999998, + "src/backend/tests/unit/base/load/test_load.py::test_run_flow_from_json_params": 0.001675444999989395, "src/backend/tests/unit/base/models/test_model_constants.py::test_provider_names": 0.024663168034749106, "src/backend/tests/unit/base/tools/test_component_tool.py::test_component_tool": 0.04467487393412739, - "src/backend/tests/unit/base/tools/test_component_toolkit.py::test_component_tool": 0.004842332971747965, - "src/backend/tests/unit/base/tools/test_component_toolkit.py::test_component_tool_with_api_key": 0.00808541601873003, - "src/backend/tests/unit/base/tools/test_toolmodemixin.py::test_component_inputs_toolkit": 0.012256792018888518, - "src/backend/tests/unit/components/agents/test_agent_component.py::test_agent_component_with_calculator": 0.6172017919889186, - "src/backend/tests/unit/components/agents/test_agent_events.py::test_chain_end_event": 0.002371458016568795, - "src/backend/tests/unit/components/agents/test_agent_events.py::test_chain_start_event": 0.0033474579686298966, - "src/backend/tests/unit/components/agents/test_agent_events.py::test_chain_stream_event": 0.0023392080038320273, - "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_chain_end_empty_data": 0.0015058319841045886, - "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_chain_end_no_output": 0.0015417499525938183, - "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_chain_end_with_empty_return_values": 0.0015497090062126517, - "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_chain_end_with_output": 0.0017662510217633098, - "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_chain_start_no_input": 0.0019011250114999712, - "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_chain_start_with_input": 0.002642874955199659, - "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_chain_stream_no_output": 0.0018438330153003335, - "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_chain_stream_with_output": 0.0029735419957432896, - "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_tool_end": 0.002772041014395654, - "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_tool_error": 0.00265129201579839, - "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_tool_start": 0.005164123984286562, - "src/backend/tests/unit/components/agents/test_agent_events.py::test_multiple_events": 0.002586001035524532, - "src/backend/tests/unit/components/agents/test_agent_events.py::test_tool_end_event": 0.0024539160367567092, - "src/backend/tests/unit/components/agents/test_agent_events.py::test_tool_error_event": 0.0024248759727925062, - "src/backend/tests/unit/components/agents/test_agent_events.py::test_tool_start_event": 0.002312166994670406, - "src/backend/tests/unit/components/agents/test_agent_events.py::test_unknown_event": 0.0021014170488342643, - "src/backend/tests/unit/components/agents/test_tool_calling_agent.py::test_tool_calling_agent_component": 0.5595056250167545, - "src/backend/tests/unit/components/data/test_api_request_component.py::test_httpx_metadata_behavior[False-expected_properties0]": 0.017366208019666374, - "src/backend/tests/unit/components/data/test_api_request_component.py::test_httpx_metadata_behavior[True-expected_properties1]": 0.01664933399297297, - "src/backend/tests/unit/components/data/test_api_request_component.py::test_parse_curl": 0.0031073760183062404, - "src/backend/tests/unit/components/data/test_api_request_component.py::test_response_info_binary_content": 0.0026153340295422822, - "src/backend/tests/unit/components/data/test_api_request_component.py::test_response_info_default_filename": 0.003500832972349599, - "src/backend/tests/unit/components/data/test_api_request_component.py::test_response_info_filename_from_content_disposition": 0.0031803759920876473, - "src/backend/tests/unit/components/data/test_api_request_component.py::test_response_info_non_binary_content": 0.0024539169971831143, - "src/backend/tests/unit/components/data/test_api_request_component.py::test_save_to_file_behavior[False-expected_properties0]": 0.01667708402965218, - "src/backend/tests/unit/components/data/test_api_request_component.py::test_save_to_file_behavior[True-expected_properties1]": 0.019881000014720485, - "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_all_versions_have_a_file_name_defined": 0.0010626679577399045, - "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_component_versions[1.0.19]": 0.3331099589995574, - "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_component_versions[1.1.0]": 0.3345156669965945, - "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_component_versions[1.1.1]": 0.2538998329837341, - "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_as_dataframe": 0.006103375984821469, - "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_component_build_with_multithreading": 0.0036587080103345215, - "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_invalid_type": 0.014712540985783562, - "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_with_depth": 0.009099000017158687, - "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_with_hidden_files": 0.007219542982056737, - "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_with_multithreading": 0.005114582017995417, - "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_with_types[file_types0-1]": 0.0055469590006396174, - "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_with_types[file_types1-1]": 0.004052749980473891, - "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_with_types[file_types2-2]": 0.005200624029384926, - "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_without_mocks": 0.14343770805862732, - "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_latest_version": 0.007099584006937221, - "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_all_versions_have_a_file_name_defined": 0.0014647079806309193, - "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_component_versions[1.0.19]": 1.3189572499832138, - "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_component_versions[1.1.0]": 1.3926032500166912, - "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_component_versions[1.1.1]": 1.2883103740168735, - "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_latest_version": 1.840616917994339, - "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_url_component": 0.002724792022490874, - "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_url_component_as_dataframe": 0.003073999978369102, - "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_url_component_fetch_content_text": 0.0033436240337323397, - "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_url_component_invalid_urls": 0.002873375022318214, - "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_url_component_multiple_urls": 0.005769292009063065, - "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_url_request_success": 0.19926166601362638, - "src/backend/tests/unit/components/git/test_git_component.py::test_check_content_pattern": 0.0037404150061775, - "src/backend/tests/unit/components/git/test_git_component.py::test_check_file_patterns": 0.003351624996867031, - "src/backend/tests/unit/components/git/test_git_component.py::test_combined_filter": 0.003000626020366326, - "src/backend/tests/unit/components/git/test_git_component.py::test_is_binary": 0.003270374989369884, - "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_all_versions_have_a_file_name_defined": 0.0008552499930374324, - "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_batch_run_without_system_message": 0.3232337090012152, - "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_component_versions[1.0.19]": 0.0008978320111054927, - "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_component_versions[1.1.0]": 0.0006737920339219272, - "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_component_versions[1.1.1]": 0.0007478329935111105, - "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_empty_dataframe": 0.039864583988673985, - "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_invalid_column_name": 0.04811716702533886, - "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_latest_version": 0.011959750030655414, - "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_non_string_column_conversion": 0.03566641701036133, - "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_successful_batch_run_with_system_message": 0.02523754199501127, - "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_correctly_builds_output_model": 0.004484751028940082, - "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_empty_output_schema": 0.0017810839926823974, - "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_handles_multiple_outputs": 0.002025083056651056, + "src/backend/tests/unit/base/tools/test_component_toolkit.py::test_component_tool": 0.004177082000012433, + "src/backend/tests/unit/base/tools/test_component_toolkit.py::test_component_tool_with_api_key": 0.005196933000007675, + "src/backend/tests/unit/base/tools/test_toolmodemixin.py::test_component_inputs_toolkit": 0.007465355999983103, + "src/backend/tests/unit/components/agents/test_agent_component.py::test_agent_component_with_calculator": 3.012310665000001, + "src/backend/tests/unit/components/agents/test_agent_events.py::test_chain_end_event": 0.002723421000041526, + "src/backend/tests/unit/components/agents/test_agent_events.py::test_chain_start_event": 0.0031313140000008843, + "src/backend/tests/unit/components/agents/test_agent_events.py::test_chain_stream_event": 0.0022288999999773296, + "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_chain_end_empty_data": 0.0014089030000263847, + "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_chain_end_no_output": 0.001377064999957156, + "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_chain_end_with_empty_return_values": 0.001431014999980107, + "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_chain_end_with_output": 0.0016798880000123972, + "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_chain_start_no_input": 0.001403592000002618, + "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_chain_start_with_input": 0.0015551260000279399, + "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_chain_stream_no_output": 0.0014626140000189025, + "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_chain_stream_with_output": 0.0016567039999699773, + "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_tool_end": 0.0014862370000230385, + "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_tool_error": 0.0015396359999897413, + "src/backend/tests/unit/components/agents/test_agent_events.py::test_handle_on_tool_start": 0.0016981630000145742, + "src/backend/tests/unit/components/agents/test_agent_events.py::test_multiple_events": 0.002762253000014425, + "src/backend/tests/unit/components/agents/test_agent_events.py::test_tool_end_event": 0.00250325199996837, + "src/backend/tests/unit/components/agents/test_agent_events.py::test_tool_error_event": 0.0023425520000444067, + "src/backend/tests/unit/components/agents/test_agent_events.py::test_tool_start_event": 0.002664390999967736, + "src/backend/tests/unit/components/agents/test_agent_events.py::test_unknown_event": 0.0022292399999912504, + "src/backend/tests/unit/components/agents/test_tool_calling_agent.py::test_tool_calling_agent_component": 0.07477499900002726, + "src/backend/tests/unit/components/data/test_api_request_component.py::test_httpx_metadata_behavior[False-expected_properties0]": 0.029715499000019463, + "src/backend/tests/unit/components/data/test_api_request_component.py::test_httpx_metadata_behavior[True-expected_properties1]": 0.0291174959999978, + "src/backend/tests/unit/components/data/test_api_request_component.py::test_parse_curl": 0.0034348949999980505, + "src/backend/tests/unit/components/data/test_api_request_component.py::test_response_info_binary_content": 0.0033098929999937354, + "src/backend/tests/unit/components/data/test_api_request_component.py::test_response_info_default_filename": 0.00411513200000968, + "src/backend/tests/unit/components/data/test_api_request_component.py::test_response_info_filename_from_content_disposition": 0.00417946300004246, + "src/backend/tests/unit/components/data/test_api_request_component.py::test_response_info_non_binary_content": 0.0030142819999809944, + "src/backend/tests/unit/components/data/test_api_request_component.py::test_save_to_file_behavior[False-expected_properties0]": 0.028832164000021976, + "src/backend/tests/unit/components/data/test_api_request_component.py::test_save_to_file_behavior[True-expected_properties1]": 0.031469264000008934, + "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_all_versions_have_a_file_name_defined": 0.001422869999970544, + "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_component_versions[1.0.19]": 0.09911990899999523, + "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_component_versions[1.1.0]": 0.08119699700000638, + "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_component_versions[1.1.1]": 0.046758023999984744, + "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_as_dataframe": 0.005158827000002475, + "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_component_build_with_multithreading": 0.004377932000039664, + "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_invalid_type": 0.003807018999992806, + "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_with_depth": 0.00484164499997064, + "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_with_hidden_files": 0.004193218000011711, + "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_with_multithreading": 0.004056042999991405, + "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_with_types[file_types0-1]": 0.004084355000031792, + "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_with_types[file_types1-1]": 0.004033810999999332, + "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_with_types[file_types2-2]": 0.004405722999962336, + "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_directory_without_mocks": 0.16762850099999582, + "src/backend/tests/unit/components/data/test_directory_component.py::TestDirectoryComponent::test_latest_version": 0.007856519000000617, + "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_all_versions_have_a_file_name_defined": 0.0010726999999803866, + "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_component_versions[1.0.19]": 0.2798216669999931, + "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_component_versions[1.1.0]": 0.41034814700000766, + "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_component_versions[1.1.1]": 0.33614162500001044, + "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_latest_version": 0.6517368909999846, + "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_url_component": 0.0034908120000238796, + "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_url_component_as_dataframe": 0.003726501000016924, + "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_url_component_fetch_content_text": 0.003086918000008154, + "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_url_component_invalid_urls": 0.0023467379999999594, + "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_url_component_multiple_urls": 0.002856578999995918, + "src/backend/tests/unit/components/data/test_url_component.py::TestURLComponent::test_url_request_success": 0.004725122999985842, + "src/backend/tests/unit/components/git/test_git_component.py::test_check_content_pattern": 0.002916851000037468, + "src/backend/tests/unit/components/git/test_git_component.py::test_check_file_patterns": 0.0026519959999973253, + "src/backend/tests/unit/components/git/test_git_component.py::test_combined_filter": 0.0030961439999828144, + "src/backend/tests/unit/components/git/test_git_component.py::test_is_binary": 0.0028118950000077803, + "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_all_versions_have_a_file_name_defined": 0.0019012260000010883, + "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_batch_run_without_system_message": 0.00541307600002483, + "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_component_versions[1.0.19]": 0.0016880569999955242, + "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_component_versions[1.1.0]": 0.0017891169999870726, + "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_component_versions[1.1.1]": 0.0018206949999637345, + "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_empty_dataframe": 0.00470479500003762, + "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_invalid_column_name": 0.004713061000018115, + "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_latest_version": 0.0269920060000004, + "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_non_string_column_conversion": 0.004753306000026214, + "src/backend/tests/unit/components/helpers/test_batch_run_component.py::TestBatchRunComponent::test_successful_batch_run_with_system_message": 0.006136335999968878, + "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_correctly_builds_output_model": 0.0033656280000116112, + "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_empty_output_schema": 0.002614867000005461, + "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_handles_multiple_outputs": 0.0031433139999705872, "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_invalid_llm_config": 0.42860454198671505, - "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_invalid_output_schema_type": 0.0016448340029455721, - "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_large_input_value": 0.0025345419999212027, - "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_nested_output_schema": 0.0033677919709589332, - "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_raises_value_error_for_unsupported_language_model": 0.0018100419838447124, - "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_successful_structured_output_generation_with_patch_with_config": 0.0028770829958375543, - "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_all_versions_have_a_file_name_defined": 1.3337952079891693, + "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_invalid_output_schema_type": 0.002534156999956849, + "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_large_input_value": 0.003497423000027311, + "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_nested_output_schema": 0.004106398999965677, + "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_raises_value_error_for_unsupported_language_model": 0.0029053389999944557, + "src/backend/tests/unit/components/helpers/test_structured_output_component.py::TestStructuredOutputComponent::test_successful_structured_output_generation_with_patch_with_config": 0.003508593999981713, + "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_all_versions_have_a_file_name_defined": 1.5048135219999494, "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_component_versions[1.0.17]": 4.332370791060384, "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_component_versions[1.0.18]": 3.6762167080305517, - "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_component_versions[1.0.19]": 1.6561730839894153, - "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_component_versions[1.1.0]": 1.9159875430050306, - "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_component_versions[1.1.1]": 1.5799270829884335, - "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_latest_version": 61.19077816599747, - "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_message_response": 60.941896500997245, - "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_message_response_ai_sender": 1.668528290989343, - "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_message_response_with_files": 1.415563334012404, - "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_message_response_without_session": 1.7423896659747697, - "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_message_storage_disabled": 1.4128342079930007, - "src/backend/tests/unit/components/inputs/test_input_components.py::TestTextInputComponent::test_all_versions_have_a_file_name_defined": 0.0006332500197459012, + "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_component_versions[1.0.19]": 1.575966238000035, + "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_component_versions[1.1.0]": 1.550689551000005, + "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_component_versions[1.1.1]": 1.5600006120000103, + "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_latest_version": 0.011442242999976315, + "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_message_response": 1.4917928520000032, + "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_message_response_ai_sender": 2.6729811119999454, + "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_message_response_with_files": 1.5572880579999833, + "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_message_response_without_session": 1.5183210909999616, + "src/backend/tests/unit/components/inputs/test_input_components.py::TestChatInput::test_message_storage_disabled": 1.5422897139999918, + "src/backend/tests/unit/components/inputs/test_input_components.py::TestTextInputComponent::test_all_versions_have_a_file_name_defined": 0.001054327000019839, "src/backend/tests/unit/components/inputs/test_input_components.py::TestTextInputComponent::test_component_versions[1.0.17]": 0.26945149997482076, "src/backend/tests/unit/components/inputs/test_input_components.py::TestTextInputComponent::test_component_versions[1.0.18]": 0.28087970800697803, - "src/backend/tests/unit/components/inputs/test_input_components.py::TestTextInputComponent::test_component_versions[1.0.19]": 0.2654190010216553, - "src/backend/tests/unit/components/inputs/test_input_components.py::TestTextInputComponent::test_component_versions[1.1.0]": 0.26737625000532717, - "src/backend/tests/unit/components/inputs/test_input_components.py::TestTextInputComponent::test_component_versions[1.1.1]": 0.26481549997697584, - "src/backend/tests/unit/components/inputs/test_input_components.py::TestTextInputComponent::test_latest_version": 0.0023170829517766833, + "src/backend/tests/unit/components/inputs/test_input_components.py::TestTextInputComponent::test_component_versions[1.0.19]": 0.07194310000005544, + "src/backend/tests/unit/components/inputs/test_input_components.py::TestTextInputComponent::test_component_versions[1.1.0]": 0.03559453999997686, + "src/backend/tests/unit/components/inputs/test_input_components.py::TestTextInputComponent::test_component_versions[1.1.1]": 0.08170321799997282, + "src/backend/tests/unit/components/inputs/test_input_components.py::TestTextInputComponent::test_latest_version": 0.003234950999967623, "src/backend/tests/unit/components/models/test_ChatOllama_component.py::test_build_model": 0.0020211669616401196, "src/backend/tests/unit/components/models/test_ChatOllama_component.py::test_get_model_failure": 0.0068002091138623655, "src/backend/tests/unit/components/models/test_ChatOllama_component.py::test_get_model_success": 0.015780292043928057, @@ -232,573 +232,573 @@ "src/backend/tests/unit/components/models/test_ChatOllama_component.py::test_update_build_config_mirostat_disabled": 0.0013394170091487467, "src/backend/tests/unit/components/models/test_ChatOllama_component.py::test_update_build_config_mirostat_enabled": 0.0016756660188548267, "src/backend/tests/unit/components/models/test_ChatOllama_component.py::test_update_build_config_model_name": 0.0062951669679023325, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_empty_str_endpoint": 0.0006237910129129887, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_invalid_endpoint": 0.0006235839973669499, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_none_endpoint": 0.0007516249897889793, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[AquilaChat-7B]": 0.00040291601908393204, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[BLOOMZ-7B]": 0.0005138330161571503, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ChatGLM2-6B-32K]": 0.0003988339740317315, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[EB-turbo-AppBuilder]": 0.00042070899507962167, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE 3.5]": 0.0012772080081049353, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE Speed-AppBuilder]": 0.0004613329947460443, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE Speed]": 0.0006137500167824328, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-3.5-8K]": 0.0007256660028360784, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-4.0-8K]": 0.00039341600495390594, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Bot-4]": 0.0004116250202059746, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Bot-turbo-AI]": 0.0004122499958612025, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Bot]": 0.0004147090367041528, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Lite-8K-0308]": 0.0006672909948974848, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Speed-128k]": 0.0004097089695278555, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Speed-8K]": 0.0013652080087922513, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Speed]": 0.0021469590137712657, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[Llama-2-13b-chat]": 0.00040550000267103314, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[Llama-2-70b-chat]": 0.00040379102574661374, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[Llama-2-7b-chat]": 0.0006662499799858779, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[Mixtral-8x7B-Instruct]": 0.0013637499941978604, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[Qianfan-BLOOMZ-7B-compressed]": 0.0003965840151067823, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[Qianfan-Chinese-Llama-2-13B]": 0.00045137599227018654, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[Qianfan-Chinese-Llama-2-7B]": 0.00046695800847373903, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[XuanYuan-70B-Chat-4bit]": 0.0004297920095268637, - "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[Yi-34B-Chat]": 0.0006268750003073364, - "src/backend/tests/unit/components/models/test_chatollama_component.py::test_build_model": 0.05619949902757071, - "src/backend/tests/unit/components/models/test_chatollama_component.py::test_get_model_failure": 0.020076000975677744, - "src/backend/tests/unit/components/models/test_chatollama_component.py::test_get_model_success": 0.5227191260200925, - "src/backend/tests/unit/components/models/test_chatollama_component.py::test_update_build_config_keep_alive": 0.0026564999716356397, - "src/backend/tests/unit/components/models/test_chatollama_component.py::test_update_build_config_mirostat_disabled": 0.004072416020790115, - "src/backend/tests/unit/components/models/test_chatollama_component.py::test_update_build_config_mirostat_enabled": 0.002698292024433613, - "src/backend/tests/unit/components/models/test_chatollama_component.py::test_update_build_config_model_name": 0.019266917021013796, - "src/backend/tests/unit/components/models/test_huggingface.py::test_huggingface_inputs": 0.0024845840234775096, - "src/backend/tests/unit/components/outputs/test_output_components.py::TestChatOutput::test_all_versions_have_a_file_name_defined": 1.3301894580072258, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_empty_str_endpoint": 0.0008305590000077245, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_invalid_endpoint": 0.0007957139999348328, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_none_endpoint": 0.0012486499999795342, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[AquilaChat-7B]": 0.0008217129999934514, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[BLOOMZ-7B]": 0.0007758870000316165, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ChatGLM2-6B-32K]": 0.0007564910000041891, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[EB-turbo-AppBuilder]": 0.0008325129999775527, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE 3.5]": 0.0007742649999613604, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE Speed-AppBuilder]": 0.000738296999998056, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE Speed]": 0.0009922909999886542, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-3.5-8K]": 0.0008410890000050131, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-4.0-8K]": 0.0007482450000111385, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Bot-4]": 0.0008687310000254911, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Bot-turbo-AI]": 0.0007885110000529494, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Bot]": 0.0007292900000948066, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Lite-8K-0308]": 0.0007915360000652072, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Speed-128k]": 0.0007505799999307783, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Speed-8K]": 0.0007554880000384401, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[ERNIE-Speed]": 0.0007956430000035652, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[Llama-2-13b-chat]": 0.0007827100000099563, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[Llama-2-70b-chat]": 0.0008440940000014052, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[Llama-2-7b-chat]": 0.0008126859999606495, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[Mixtral-8x7B-Instruct]": 0.0008399260000260256, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[Qianfan-BLOOMZ-7B-compressed]": 0.0008165840000060598, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[Qianfan-Chinese-Llama-2-13B]": 0.0007709579999755078, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[Qianfan-Chinese-Llama-2-7B]": 0.0008596629999715333, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[XuanYuan-70B-Chat-4bit]": 0.0008613959999479448, + "src/backend/tests/unit/components/models/test_baidu_qianfan.py::test_qianfan_different_models[Yi-34B-Chat]": 0.0007524140000327861, + "src/backend/tests/unit/components/models/test_chatollama_component.py::test_build_model": 0.08937712900007, + "src/backend/tests/unit/components/models/test_chatollama_component.py::test_get_model_failure": 0.02825947100001258, + "src/backend/tests/unit/components/models/test_chatollama_component.py::test_get_model_success": 0.04064742599996407, + "src/backend/tests/unit/components/models/test_chatollama_component.py::test_update_build_config_keep_alive": 0.0038227300000244213, + "src/backend/tests/unit/components/models/test_chatollama_component.py::test_update_build_config_mirostat_disabled": 0.005271572999959062, + "src/backend/tests/unit/components/models/test_chatollama_component.py::test_update_build_config_mirostat_enabled": 0.0036124290000429937, + "src/backend/tests/unit/components/models/test_chatollama_component.py::test_update_build_config_model_name": 0.02888246300000219, + "src/backend/tests/unit/components/models/test_huggingface.py::test_huggingface_inputs": 0.0028224550000004456, + "src/backend/tests/unit/components/outputs/test_output_components.py::TestChatOutput::test_all_versions_have_a_file_name_defined": 1.5647131269999477, "src/backend/tests/unit/components/outputs/test_output_components.py::TestChatOutput::test_component_versions[1.0.17]": 3.6106157921021804, "src/backend/tests/unit/components/outputs/test_output_components.py::TestChatOutput::test_component_versions[1.0.18]": 3.6919090420706198, - "src/backend/tests/unit/components/outputs/test_output_components.py::TestChatOutput::test_component_versions[1.0.19]": 1.6698510839778464, - "src/backend/tests/unit/components/outputs/test_output_components.py::TestChatOutput::test_component_versions[1.1.0]": 1.5964839159860276, - "src/backend/tests/unit/components/outputs/test_output_components.py::TestChatOutput::test_component_versions[1.1.1]": 1.8134009999921545, - "src/backend/tests/unit/components/outputs/test_output_components.py::TestChatOutput::test_latest_version": 61.025084708991926, - "src/backend/tests/unit/components/outputs/test_output_components.py::TestTextOutputComponent::test_all_versions_have_a_file_name_defined": 0.0007481240027118474, + "src/backend/tests/unit/components/outputs/test_output_components.py::TestChatOutput::test_component_versions[1.0.19]": 1.655275840999991, + "src/backend/tests/unit/components/outputs/test_output_components.py::TestChatOutput::test_component_versions[1.1.0]": 1.592353811999999, + "src/backend/tests/unit/components/outputs/test_output_components.py::TestChatOutput::test_component_versions[1.1.1]": 1.642500195000025, + "src/backend/tests/unit/components/outputs/test_output_components.py::TestChatOutput::test_latest_version": 1.5783257749999962, + "src/backend/tests/unit/components/outputs/test_output_components.py::TestTextOutputComponent::test_all_versions_have_a_file_name_defined": 0.0010861470000236295, "src/backend/tests/unit/components/outputs/test_output_components.py::TestTextOutputComponent::test_component_versions[1.0.17]": 0.27941045799525455, "src/backend/tests/unit/components/outputs/test_output_components.py::TestTextOutputComponent::test_component_versions[1.0.18]": 0.24612879107007757, - "src/backend/tests/unit/components/outputs/test_output_components.py::TestTextOutputComponent::test_component_versions[1.0.19]": 0.30652370798634365, - "src/backend/tests/unit/components/outputs/test_output_components.py::TestTextOutputComponent::test_component_versions[1.1.0]": 0.3760060830099974, - "src/backend/tests/unit/components/outputs/test_output_components.py::TestTextOutputComponent::test_component_versions[1.1.1]": 0.2740736670093611, - "src/backend/tests/unit/components/outputs/test_output_components.py::TestTextOutputComponent::test_latest_version": 0.008232540974859148, - "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_empty_dataframe": 0.0017680000164546072, - "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_invalid_operation": 0.002023250999627635, - "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_non_existent_column": 0.0014585420140065253, - "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Add Column-expected_columns0-expected_values0]": 0.0040456660208292305, - "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Drop Column-expected_columns1-None]": 0.0035802920174319297, - "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Filter-expected_columns2-expected_values2]": 0.0020335410081315786, - "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Head-expected_columns6-expected_values6]": 0.00148620898835361, - "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Rename Column-expected_columns4-None]": 0.0020349170081317425, - "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Replace Value-expected_columns8-expected_values8]": 0.002100540994433686, - "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Select Columns-expected_columns5-None]": 0.0019658749806694686, - "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Sort-expected_columns3-expected_values3]": 0.0025413749972358346, - "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Tail-expected_columns7-expected_values7]": 0.0014617499837186188, - "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_all_versions_have_a_file_name_defined": 0.0007744569738861173, - "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_component_versions[1.0.19]": 0.30622270799358375, - "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_component_versions[1.1.0]": 0.26799249902251177, - "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_component_versions[1.1.1]": 0.3108463329845108, - "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_latest_version": 0.015081710007507354, - "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_split_text_as_dataframe": 0.0036359579826239496, - "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_split_text_basic": 0.002052416995866224, - "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_split_text_custom_separator": 0.001836584007833153, - "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_split_text_empty_input": 0.001333208056166768, - "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_split_text_multiple_inputs": 0.0015610840055160224, - "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_split_text_single_chunk": 0.0012377100065350533, - "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_split_text_with_metadata": 0.0022193740005604923, - "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_split_text_with_overlap": 0.0016871260304469615, - "src/backend/tests/unit/components/prompts/test_prompt_component.py::TestPromptComponent::test_all_versions_have_a_file_name_defined": 1.5207552909851074, + "src/backend/tests/unit/components/outputs/test_output_components.py::TestTextOutputComponent::test_component_versions[1.0.19]": 0.03518639599991502, + "src/backend/tests/unit/components/outputs/test_output_components.py::TestTextOutputComponent::test_component_versions[1.1.0]": 0.03161542900005543, + "src/backend/tests/unit/components/outputs/test_output_components.py::TestTextOutputComponent::test_component_versions[1.1.1]": 0.030538153999998485, + "src/backend/tests/unit/components/outputs/test_output_components.py::TestTextOutputComponent::test_latest_version": 0.003240054999992026, + "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_empty_dataframe": 0.002166881999926318, + "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_invalid_operation": 0.0022408699999800774, + "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_non_existent_column": 0.0022595649999743728, + "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Add Column-expected_columns0-expected_values0]": 0.0035727640000686733, + "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Drop Column-expected_columns1-None]": 0.0028755249999790067, + "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Filter-expected_columns2-expected_values2]": 0.0028736209999919993, + "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Head-expected_columns6-expected_values6]": 0.002457174000028317, + "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Rename Column-expected_columns4-None]": 0.002593075999982375, + "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Replace Value-expected_columns8-expected_values8]": 0.002718189999995957, + "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Select Columns-expected_columns5-None]": 0.002730964000079439, + "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Sort-expected_columns3-expected_values3]": 0.0028808939999294125, + "src/backend/tests/unit/components/processing/test_dataframe_operations.py::test_operations[Tail-expected_columns7-expected_values7]": 0.002579421000007187, + "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_all_versions_have_a_file_name_defined": 0.0010402400000657508, + "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_component_versions[1.0.19]": 0.07575405999995155, + "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_component_versions[1.1.0]": 0.1015651479999633, + "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_component_versions[1.1.1]": 0.03383322099995212, + "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_latest_version": 0.00387014000000363, + "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_split_text_as_dataframe": 0.002899319999983163, + "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_split_text_basic": 0.002434912000012446, + "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_split_text_custom_separator": 0.002392221999969024, + "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_split_text_empty_input": 0.0019735609999997905, + "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_split_text_multiple_inputs": 0.0023335330000122667, + "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_split_text_single_chunk": 0.0020577179999463624, + "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_split_text_with_metadata": 0.002013225999974111, + "src/backend/tests/unit/components/processing/test_split_text_component.py::TestSplitTextComponent::test_split_text_with_overlap": 0.0020263489999479134, + "src/backend/tests/unit/components/prompts/test_prompt_component.py::TestPromptComponent::test_all_versions_have_a_file_name_defined": 2.5852843129999883, "src/backend/tests/unit/components/prompts/test_prompt_component.py::TestPromptComponent::test_component_versions[1.0.17]": 15.071019583090674, "src/backend/tests/unit/components/prompts/test_prompt_component.py::TestPromptComponent::test_component_versions[1.0.18]": 5.277748624968808, - "src/backend/tests/unit/components/prompts/test_prompt_component.py::TestPromptComponent::test_component_versions[1.0.19]": 1.6095217510010116, - "src/backend/tests/unit/components/prompts/test_prompt_component.py::TestPromptComponent::test_component_versions[1.1.0]": 1.8817649160337169, - "src/backend/tests/unit/components/prompts/test_prompt_component.py::TestPromptComponent::test_component_versions[1.1.1]": 62.28817237401381, - "src/backend/tests/unit/components/prompts/test_prompt_component.py::TestPromptComponent::test_latest_version": 60.220392416988034, - "src/backend/tests/unit/components/prompts/test_prompt_component.py::TestPromptComponent::test_post_code_processing": 1.3076472490211017, - "src/backend/tests/unit/components/prompts/test_prompt_component.py::TestPromptComponent::test_prompt_component_latest": 1.3532734580221586, - "src/backend/tests/unit/components/prototypes/test_create_data_component.py::test_build_data": 0.0014012080209795386, - "src/backend/tests/unit/components/prototypes/test_create_data_component.py::test_get_data": 0.0009627089893911034, - "src/backend/tests/unit/components/prototypes/test_create_data_component.py::test_update_build_config": 0.001754167053150013, - "src/backend/tests/unit/components/prototypes/test_create_data_component.py::test_update_build_config_exceed_limit": 0.0010755839757621288, - "src/backend/tests/unit/components/prototypes/test_create_data_component.py::test_validate_text_key_invalid": 0.0009951670072041452, - "src/backend/tests/unit/components/prototypes/test_create_data_component.py::test_validate_text_key_valid": 0.0008797090267762542, - "src/backend/tests/unit/components/prototypes/test_update_data_component.py::test_build_data": 0.001470292016165331, - "src/backend/tests/unit/components/prototypes/test_update_data_component.py::test_get_data": 0.0011543339933268726, - "src/backend/tests/unit/components/prototypes/test_update_data_component.py::test_update_build_config": 0.0016987920098472387, - "src/backend/tests/unit/components/prototypes/test_update_data_component.py::test_update_build_config_exceed_limit": 0.0011546250025276095, - "src/backend/tests/unit/components/prototypes/test_update_data_component.py::test_validate_text_key_invalid": 0.0014267909864429384, - "src/backend/tests/unit/components/prototypes/test_update_data_component.py::test_validate_text_key_valid": 0.0012359580141492188, - "src/backend/tests/unit/components/tools/test_python_repl_tool.py::test_python_repl_tool_template": 0.011073624977143481, - "src/backend/tests/unit/components/tools/test_wikidata_api.py::test_fetch_content_empty_response": 0.0022772079973947257, - "src/backend/tests/unit/components/tools/test_wikidata_api.py::test_fetch_content_error_handling": 0.002572126017184928, - "src/backend/tests/unit/components/tools/test_wikidata_api.py::test_fetch_content_success": 0.002443125005811453, - "src/backend/tests/unit/components/tools/test_wikidata_api.py::test_fetch_content_text": 0.001968833996215835, - "src/backend/tests/unit/components/tools/test_wikidata_api.py::test_wikidata_initialization": 0.00225349998800084, - "src/backend/tests/unit/components/tools/test_wikidata_api.py::test_wikidata_template": 0.012159415986388922, - "src/backend/tests/unit/components/tools/test_yfinance_tool.py::test_yfinance_tool_template": 0.021843124966835603, - "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_all_versions_have_a_file_name_defined": 0.033616291009821, - "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_chroma_collection_to_data": 1.0298413329874165, - "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_chroma_collection_to_data_empty_collection": 0.15264508401742205, - "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_chroma_collection_to_data_without_metadata": 1.34676324998145, - "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_component_versions[1.0.19]": 0.4536823750240728, - "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_component_versions[1.1.0]": 0.3665697920077946, - "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_component_versions[1.1.1]": 0.4481858340150211, - "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_create_collection_with_data": 1.4714491659542546, - "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_create_db": 0.09763825000845827, - "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_duplicate_handling": 1.4940039570210502, - "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_latest_version": 0.8062127500306815, - "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_mmr_search": 3.7831454160332214, - "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_search_with_different_types": 3.259832292009378, - "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_search_with_score": 2.6811004169867374, - "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_similarity_search": 1.9340051670151297, - "src/backend/tests/unit/custom/component/test_component_instance_attributes.py::test_files_independence": 0.007871999987401068, - "src/backend/tests/unit/custom/component/test_component_instance_attributes.py::test_input_value_independence": 0.008282916009193286, - "src/backend/tests/unit/custom/component/test_component_instance_attributes.py::test_message_output_independence": 0.00598387504578568, - "src/backend/tests/unit/custom/component/test_component_instance_attributes.py::test_multiple_attributes_independence": 0.003124208014924079, - "src/backend/tests/unit/custom/component/test_component_instance_attributes.py::test_sender_name_independence": 0.002149458014173433, - "src/backend/tests/unit/custom/component/test_component_instance_attributes.py::test_status_independence": 0.016107917006593198, + "src/backend/tests/unit/components/prompts/test_prompt_component.py::TestPromptComponent::test_component_versions[1.0.19]": 1.6111417320000214, + "src/backend/tests/unit/components/prompts/test_prompt_component.py::TestPromptComponent::test_component_versions[1.1.0]": 1.645558005000055, + "src/backend/tests/unit/components/prompts/test_prompt_component.py::TestPromptComponent::test_component_versions[1.1.1]": 1.6451582309999822, + "src/backend/tests/unit/components/prompts/test_prompt_component.py::TestPromptComponent::test_latest_version": 1.5466248879999966, + "src/backend/tests/unit/components/prompts/test_prompt_component.py::TestPromptComponent::test_post_code_processing": 1.5484888360000468, + "src/backend/tests/unit/components/prompts/test_prompt_component.py::TestPromptComponent::test_prompt_component_latest": 1.559530742999982, + "src/backend/tests/unit/components/prototypes/test_create_data_component.py::test_build_data": 0.0018341809999924408, + "src/backend/tests/unit/components/prototypes/test_create_data_component.py::test_get_data": 0.0014434320000304979, + "src/backend/tests/unit/components/prototypes/test_create_data_component.py::test_update_build_config": 0.002152524999985417, + "src/backend/tests/unit/components/prototypes/test_create_data_component.py::test_update_build_config_exceed_limit": 0.0015598389999809115, + "src/backend/tests/unit/components/prototypes/test_create_data_component.py::test_validate_text_key_invalid": 0.0015290320000076463, + "src/backend/tests/unit/components/prototypes/test_create_data_component.py::test_validate_text_key_valid": 0.00141293499996209, + "src/backend/tests/unit/components/prototypes/test_update_data_component.py::test_build_data": 0.002288969000005636, + "src/backend/tests/unit/components/prototypes/test_update_data_component.py::test_get_data": 0.0018646380000859608, + "src/backend/tests/unit/components/prototypes/test_update_data_component.py::test_update_build_config": 0.0021155350000867656, + "src/backend/tests/unit/components/prototypes/test_update_data_component.py::test_update_build_config_exceed_limit": 0.002427898000064488, + "src/backend/tests/unit/components/prototypes/test_update_data_component.py::test_validate_text_key_invalid": 0.0018882520000147451, + "src/backend/tests/unit/components/prototypes/test_update_data_component.py::test_validate_text_key_valid": 0.0019240880000097604, + "src/backend/tests/unit/components/tools/test_python_repl_tool.py::test_python_repl_tool_template": 0.022143096000036167, + "src/backend/tests/unit/components/tools/test_wikidata_api.py::test_fetch_content_empty_response": 0.0033463309999888224, + "src/backend/tests/unit/components/tools/test_wikidata_api.py::test_fetch_content_error_handling": 0.0027890210000691695, + "src/backend/tests/unit/components/tools/test_wikidata_api.py::test_fetch_content_success": 0.0033035999999810883, + "src/backend/tests/unit/components/tools/test_wikidata_api.py::test_fetch_content_text": 0.002720753000005516, + "src/backend/tests/unit/components/tools/test_wikidata_api.py::test_wikidata_initialization": 0.0027075799998783623, + "src/backend/tests/unit/components/tools/test_wikidata_api.py::test_wikidata_template": 0.0164490569999316, + "src/backend/tests/unit/components/tools/test_yfinance_tool.py::test_yfinance_tool_template": 0.04200019999990445, + "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_all_versions_have_a_file_name_defined": 0.048225879000028726, + "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_chroma_collection_to_data": 0.3418821540000181, + "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_chroma_collection_to_data_empty_collection": 0.14159084199997096, + "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_chroma_collection_to_data_without_metadata": 0.4475617150000062, + "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_component_versions[1.0.19]": 0.21565879499996754, + "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_component_versions[1.1.0]": 0.25709917199998245, + "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_component_versions[1.1.1]": 0.253565402999925, + "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_create_collection_with_data": 1.1644287899999881, + "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_create_db": 0.1310368470000185, + "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_duplicate_handling": 0.8564732869999716, + "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_latest_version": 0.36979149200004713, + "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_mmr_search": 1.50333074699995, + "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_search_with_different_types": 1.6187152669999136, + "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_search_with_score": 2.045318189999989, + "src/backend/tests/unit/components/vectorstores/test_chroma_vector_store_component.py::TestChromaVectorStoreComponent::test_similarity_search": 1.6755597570000305, + "src/backend/tests/unit/custom/component/test_component_instance_attributes.py::test_files_independence": 0.002885682999931305, + "src/backend/tests/unit/custom/component/test_component_instance_attributes.py::test_input_value_independence": 0.003504337000038049, + "src/backend/tests/unit/custom/component/test_component_instance_attributes.py::test_message_output_independence": 0.004086943999993764, + "src/backend/tests/unit/custom/component/test_component_instance_attributes.py::test_multiple_attributes_independence": 0.0027001960000347935, + "src/backend/tests/unit/custom/component/test_component_instance_attributes.py::test_sender_name_independence": 0.002717849000021033, + "src/backend/tests/unit/custom/component/test_component_instance_attributes.py::test_status_independence": 0.00392599400004201, "src/backend/tests/unit/custom/component/test_component_to_tool.py::test_component_to_tool": 0.019733334018383175, "src/backend/tests/unit/custom/component/test_component_to_tool.py::test_component_to_tool_has_no_component_as_tool": 0.0017144169833045453, - "src/backend/tests/unit/custom/component/test_component_to_tool.py::test_component_to_toolkit": 0.007960874994751066, - "src/backend/tests/unit/custom/component/test_componet_set_functionality.py::test_set_with_message_text_input_list": 0.0006631250435020775, - "src/backend/tests/unit/custom/component/test_componet_set_functionality.py::test_set_with_mixed_list_input": 0.0012695819896180183, - "src/backend/tests/unit/custom/custom_component/test_component.py::test_set_component": 0.0021670830028597265, - "src/backend/tests/unit/custom/custom_component/test_component.py::test_set_invalid_output": 0.0028930410044267774, - "src/backend/tests/unit/custom/custom_component/test_component.py::test_set_required_inputs": 0.0014798749762121588, - "src/backend/tests/unit/custom/custom_component/test_component.py::test_set_required_inputs_various_components": 0.005512666975846514, - "src/backend/tests/unit/custom/custom_component/test_component.py::test_update_component_build_config_async": 0.028195831982884556, - "src/backend/tests/unit/custom/custom_component/test_component.py::test_update_component_build_config_sync": 0.043842626007972285, - "src/backend/tests/unit/custom/custom_component/test_component_events.py::test_component_build_results": 1.5518049579695798, - "src/backend/tests/unit/custom/custom_component/test_component_events.py::test_component_error_handling": 1.3093537499953527, - "src/backend/tests/unit/custom/custom_component/test_component_events.py::test_component_logging": 1.8488170409982558, - "src/backend/tests/unit/custom/custom_component/test_component_events.py::test_component_message_sending": 1.2429808760352898, - "src/backend/tests/unit/custom/custom_component/test_component_events.py::test_component_streaming_message": 1.3936392500181682, - "src/backend/tests/unit/custom/custom_component/test_component_events.py::test_component_tool_output": 1.3972079170052893, - "src/backend/tests/unit/custom/custom_component/test_update_outputs.py::TestComponentOutputs::test_run_and_validate_update_outputs_custom_update": 0.0007521669613197446, - "src/backend/tests/unit/custom/custom_component/test_update_outputs.py::TestComponentOutputs::test_run_and_validate_update_outputs_invalid_output": 0.0008412079769186676, - "src/backend/tests/unit/custom/custom_component/test_update_outputs.py::TestComponentOutputs::test_run_and_validate_update_outputs_output_validation": 0.0007758319843560457, - "src/backend/tests/unit/custom/custom_component/test_update_outputs.py::TestComponentOutputs::test_run_and_validate_update_outputs_tool_mode": 0.0017231249948963523, - "src/backend/tests/unit/custom/custom_component/test_update_outputs.py::TestComponentOutputs::test_run_and_validate_update_outputs_with_existing_tool_output": 0.0008380009967368096, - "src/backend/tests/unit/custom/custom_component/test_update_outputs.py::TestComponentOutputs::test_run_and_validate_update_outputs_with_multiple_outputs": 0.0007860839832574129, - "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_accessing_non_registered_callback": 0.0005387899873312563, - "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_accessing_non_registered_event_callback_with_recommended_fix": 0.0004892919969279319, - "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_accessing_registered_event_callback": 0.0004759589792229235, - "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_event_id_uniqueness_with_await": 0.0011245420027989894, - "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_handling_large_number_of_events": 0.001501416991231963, - "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_performance_impact_frequent_registrations": 0.0010817910078912973, - "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_queue_receives_correct_event_data_format": 0.0010312919912394136, - "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_register_event_with_empty_name": 0.000543709029443562, - "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_register_event_with_invalid_name_fixed": 0.0007266659813467413, - "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_register_event_with_valid_name_and_callback_with_mock_callback": 0.0006197509937919676, - "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_register_event_with_valid_name_and_no_callback": 0.00047366696526296437, - "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_register_event_without_event_type_argument_fixed": 0.0008825410332065076, - "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_sending_event_with_complex_data": 0.0010801659955177456, - "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_sending_event_with_none_data": 0.0005283750069793314, + "src/backend/tests/unit/custom/component/test_component_to_tool.py::test_component_to_toolkit": 0.004543995000005907, + "src/backend/tests/unit/custom/component/test_componet_set_functionality.py::test_set_with_message_text_input_list": 0.001054616999965674, + "src/backend/tests/unit/custom/component/test_componet_set_functionality.py::test_set_with_mixed_list_input": 0.0012476080000283218, + "src/backend/tests/unit/custom/custom_component/test_component.py::test_set_component": 0.0026277620000314528, + "src/backend/tests/unit/custom/custom_component/test_component.py::test_set_invalid_output": 0.003529122999907486, + "src/backend/tests/unit/custom/custom_component/test_component.py::test_set_required_inputs": 0.001764981999997417, + "src/backend/tests/unit/custom/custom_component/test_component.py::test_set_required_inputs_various_components": 0.007025344999988192, + "src/backend/tests/unit/custom/custom_component/test_component.py::test_update_component_build_config_async": 0.014721209000015278, + "src/backend/tests/unit/custom/custom_component/test_component.py::test_update_component_build_config_sync": 0.037478135999947426, + "src/backend/tests/unit/custom/custom_component/test_component_events.py::test_component_build_results": 1.5888679939999975, + "src/backend/tests/unit/custom/custom_component/test_component_events.py::test_component_error_handling": 1.6710162119999836, + "src/backend/tests/unit/custom/custom_component/test_component_events.py::test_component_logging": 1.629418363999946, + "src/backend/tests/unit/custom/custom_component/test_component_events.py::test_component_message_sending": 1.6353781330000174, + "src/backend/tests/unit/custom/custom_component/test_component_events.py::test_component_streaming_message": 2.9224710769999547, + "src/backend/tests/unit/custom/custom_component/test_component_events.py::test_component_tool_output": 1.575011520999908, + "src/backend/tests/unit/custom/custom_component/test_update_outputs.py::TestComponentOutputs::test_run_and_validate_update_outputs_custom_update": 0.0011392359999717883, + "src/backend/tests/unit/custom/custom_component/test_update_outputs.py::TestComponentOutputs::test_run_and_validate_update_outputs_invalid_output": 0.0012978820000171254, + "src/backend/tests/unit/custom/custom_component/test_update_outputs.py::TestComponentOutputs::test_run_and_validate_update_outputs_output_validation": 0.001293183000029785, + "src/backend/tests/unit/custom/custom_component/test_update_outputs.py::TestComponentOutputs::test_run_and_validate_update_outputs_tool_mode": 0.001823960999956853, + "src/backend/tests/unit/custom/custom_component/test_update_outputs.py::TestComponentOutputs::test_run_and_validate_update_outputs_with_existing_tool_output": 0.0012720630000444544, + "src/backend/tests/unit/custom/custom_component/test_update_outputs.py::TestComponentOutputs::test_run_and_validate_update_outputs_with_multiple_outputs": 0.001172436999979709, + "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_accessing_non_registered_callback": 0.0009416259999852628, + "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_accessing_non_registered_event_callback_with_recommended_fix": 0.0009398430000260305, + "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_accessing_registered_event_callback": 0.000922351000042454, + "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_event_id_uniqueness_with_await": 0.0013536039999735294, + "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_handling_large_number_of_events": 0.0022268540000141, + "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_performance_impact_frequent_registrations": 0.0018142440000588067, + "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_queue_receives_correct_event_data_format": 0.001307458000042061, + "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_register_event_with_empty_name": 0.00101392199997008, + "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_register_event_with_invalid_name_fixed": 0.0011417900000196823, + "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_register_event_with_valid_name_and_callback_with_mock_callback": 0.0012222399999473055, + "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_register_event_with_valid_name_and_no_callback": 0.0009008390000531108, + "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_register_event_without_event_type_argument_fixed": 0.0009971009999389935, + "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_sending_event_with_complex_data": 0.0014525599999615224, + "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_sending_event_with_none_data": 0.0009277300000576361, "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_sending_event_with_valid_type_and_data_asyncio_plugin": 0.007096707937307656, - "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_thread_safety_accessing_events_dictionary": 0.0010646250157151371, - "src/backend/tests/unit/exceptions/test_api.py::test_api_exception": 0.0029699999722652137, - "src/backend/tests/unit/exceptions/test_api.py::test_api_exception_no_flow": 0.0007042080396786332, - "src/backend/tests/unit/graph/edge/test_edge_base.py::test_edge_raises_error_on_invalid_target_handle": 0.016517957963515073, - "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_create_model_and_assign_values_fails": 0.0027644170040730387, - "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_create_model_with_fields_from_kwargs": 0.0008491669723298401, - "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_create_model_with_invalid_callable": 0.0007403339841403067, - "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_create_model_with_valid_return_type_annotations": 0.00372404299560003, - "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_create_with_multiple_components": 0.0054351250000763685, - "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_create_with_pydantic_field": 0.0037736660160589963, - "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_default_model_name_to_state": 0.0015526659844908863, - "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_graph_functional_start_state_update": 61.07310412498191, - "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_handle_empty_kwargs_gracefully": 0.000962125021032989, - "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_raise_typeerror_for_invalid_field_type_in_tuple": 0.0006300000241026282, + "src/backend/tests/unit/events/test_event_manager.py::TestEventManager::test_thread_safety_accessing_events_dictionary": 0.0013232879999804936, + "src/backend/tests/unit/exceptions/test_api.py::test_api_exception": 0.0046495230000687116, + "src/backend/tests/unit/exceptions/test_api.py::test_api_exception_no_flow": 0.000948799000013878, + "src/backend/tests/unit/graph/edge/test_edge_base.py::test_edge_raises_error_on_invalid_target_handle": 0.029882319000023472, + "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_create_model_and_assign_values_fails": 0.004273301000068841, + "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_create_model_with_fields_from_kwargs": 0.0014533909999840944, + "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_create_model_with_invalid_callable": 0.0010254110000005312, + "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_create_model_with_valid_return_type_annotations": 0.005163761999938288, + "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_create_with_multiple_components": 0.005385344999979225, + "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_create_with_pydantic_field": 0.004351724999992257, + "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_default_model_name_to_state": 0.0013961540000195782, + "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_graph_functional_start_state_update": 1.6868459210000424, + "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_handle_empty_kwargs_gracefully": 0.0013602069999478772, + "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_raise_typeerror_for_invalid_field_type_in_tuple": 0.0010355919999938124, "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_raise_valueerror_for_invalid_field_type_in_tuple": 0.00342700001783669, - "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_raise_valueerror_for_unsupported_value_types": 0.0007755430124234408, - "src/backend/tests/unit/graph/graph/test_base.py::test_graph": 0.012193833012133837, - "src/backend/tests/unit/graph/graph/test_base.py::test_graph_functional": 0.08576708301552571, - "src/backend/tests/unit/graph/graph/test_base.py::test_graph_functional_async_start": 0.14174241700675339, - "src/backend/tests/unit/graph/graph/test_base.py::test_graph_functional_start": 0.011415499990107492, - "src/backend/tests/unit/graph/graph/test_base.py::test_graph_functional_start_end": 0.01616929197916761, - "src/backend/tests/unit/graph/graph/test_base.py::test_graph_not_prepared": 0.10679987497860566, + "src/backend/tests/unit/graph/graph/state/test_state_model.py::TestCreateStateModel::test_raise_valueerror_for_unsupported_value_types": 0.0010802850000004582, + "src/backend/tests/unit/graph/graph/test_base.py::test_graph": 0.018625401999997848, + "src/backend/tests/unit/graph/graph/test_base.py::test_graph_functional": 0.01637409100004561, + "src/backend/tests/unit/graph/graph/test_base.py::test_graph_functional_async_start": 0.017259272999979203, + "src/backend/tests/unit/graph/graph/test_base.py::test_graph_functional_start": 0.01762604100002818, + "src/backend/tests/unit/graph/graph/test_base.py::test_graph_functional_start_end": 0.027254366999954982, + "src/backend/tests/unit/graph/graph/test_base.py::test_graph_not_prepared": 0.03329151600001978, "src/backend/tests/unit/graph/graph/test_base.py::test_graph_set_with_invalid_component": 0.0009155830484814942, - "src/backend/tests/unit/graph/graph/test_base.py::test_graph_set_with_valid_component": 0.00022187602007761598, - "src/backend/tests/unit/graph/graph/test_base.py::test_graph_with_edge": 0.3945287909882609, - "src/backend/tests/unit/graph/graph/test_callback_graph.py::test_callback_graph": 0.00019604101544246078, - "src/backend/tests/unit/graph/graph/test_cycles.py::test_conditional_router_max_iterations": 0.04364991598413326, - "src/backend/tests/unit/graph/graph/test_cycles.py::test_cycle_in_graph": 0.0002916670055128634, - "src/backend/tests/unit/graph/graph/test_cycles.py::test_cycle_in_graph_max_iterations": 0.019244750030338764, - "src/backend/tests/unit/graph/graph/test_cycles.py::test_that_outputs_cache_is_set_to_false_in_cycle": 0.011639750009635463, - "src/backend/tests/unit/graph/graph/test_cycles.py::test_updated_graph_with_max_iterations": 3.0966855410370044, - "src/backend/tests/unit/graph/graph/test_cycles.py::test_updated_graph_with_prompts": 2.9236285419901833, - "src/backend/tests/unit/graph/graph/test_graph_state_model.py::test_graph_functional_start_graph_state_update": 0.025387083005625755, - "src/backend/tests/unit/graph/graph/test_graph_state_model.py::test_graph_state_model": 0.3463474999880418, - "src/backend/tests/unit/graph/graph/test_graph_state_model.py::test_graph_state_model_json_schema": 0.00018595799338072538, - "src/backend/tests/unit/graph/graph/test_graph_state_model.py::test_graph_state_model_serialization": 0.02110020798863843, - "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_add_to_vertices_being_run": 0.0019493750296533108, - "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_are_all_predecessors_fulfilled": 0.0026042079844046384, - "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_are_all_predecessors_fulfilled__wrong": 0.0005982079892419279, - "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_build_run_map": 0.0006956249999348074, - "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_from_dict": 0.000620041013462469, - "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_from_dict_without_run_map__bad_case": 0.0005465419963002205, - "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_from_dict_without_run_predecessors__bad_case": 0.0008345409878529608, - "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_from_dict_without_vertices_being_run__bad_case": 0.0005128329794388264, - "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_from_dict_without_vertices_to_run__bad_case": 0.0006007499760016799, - "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_is_vertex_runnable": 0.0006076250283513218, - "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_is_vertex_runnable__wrong_is_active": 0.0011433760519139469, - "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_is_vertex_runnable__wrong_run_predecessors": 0.0018341650138609111, - "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_is_vertex_runnable__wrong_vertices_to_run": 0.0005568329943343997, - "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_pickle": 0.0005303749931044877, - "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_remove_from_predecessors": 0.007398457993986085, - "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_remove_vertex_from_runnables": 0.0020007909915875643, - "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_to_dict": 0.001082917005987838, - "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_update_run_state": 0.0006445829931180924, - "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_update_vertex_run_state": 0.0005340850038919598, - "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_update_vertex_run_state__bad_case": 0.0004844590148422867, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_detects_cycles_in_simple_graph": 0.0005540419660974294, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_disconnected_components": 0.0005470419710036367, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_duplicate_edges": 0.00046458400902338326, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_identifies_multiple_cycles": 0.0009737910295370966, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_large_graphs_efficiency": 0.001372208003886044, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_mixed_data_types_in_edges": 0.0004697499971371144, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_multiple_edges_between_same_nodes": 0.0004383749619591981, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_no_cycles_present": 0.0018800419929903, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_nodes_with_no_incoming_edges": 0.0004705010214820504, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_nodes_with_no_outgoing_edges": 0.0004541249945759773, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_self_loops": 0.0004940830112900585, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_single_node_no_edges": 0.0007123760005924851, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_detects_cycle_in_simple_graph": 0.0016330409853253514, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_disconnected_components": 0.0004982080135960132, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_duplicate_edges": 0.0010904150258284062, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_empty_edges_list": 0.0010827919759321958, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_identifies_first_cycle": 0.0004772509855683893, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_large_graph_efficiency": 0.0005462090484797955, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_multiple_cycles": 0.0005846249696332961, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_multiple_edges_between_same_nodes": 0.0009279990044888109, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_nodes_with_no_outgoing_edges": 0.0005306250241119415, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_returns_none_when_no_cycle": 0.0005364569660741836, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_self_loop_cycle": 0.0004684999876189977, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_single_node_no_edges": 0.0005639169830828905, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_correctly_identify_and_return_vertices_in_single_cycle": 0.00047112497850321233, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_detect_cycles_simple_graph": 0.0009745419956743717, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_duplicate_edges_fixed_fixed": 0.0004595829814206809, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_empty_edges": 0.0004926249966956675, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_large_graphs_efficiently": 0.00048499999684281647, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_no_outgoing_edges": 0.000635957985650748, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_self_loops": 0.0011790000135079026, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_single_cycle": 0.0010164999985136092, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_two_inputs_in_cycle[0]": 0.00048420901293866336, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_two_inputs_in_cycle[1]": 0.00047970705782063305, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_two_inputs_in_cycle[2]": 0.0004949579888489097, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_two_inputs_in_cycle[3]": 0.0005443330155685544, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_two_inputs_in_cycle[4]": 0.00047712595551274717, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_no_cycles_empty_list": 0.0004750840016640723, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_no_modification_of_input_edges_list": 0.0005071249906904995, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_non_string_vertex_ids": 0.0005092910141684115, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_process_disconnected_components": 0.0006390420312527567, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_return_vertices_involved_in_multiple_cycles": 0.00048141696606762707, - "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_single_vertex_no_edges": 0.0009626669925637543, - "src/backend/tests/unit/graph/graph/test_utils.py::test_chat_inputs_at_start": 0.000529957003891468, - "src/backend/tests/unit/graph/graph/test_utils.py::test_get_sorted_vertices_exact_sequence": 0.0005033329944126308, - "src/backend/tests/unit/graph/graph/test_utils.py::test_get_sorted_vertices_simple": 0.000468001002445817, - "src/backend/tests/unit/graph/graph/test_utils.py::test_get_sorted_vertices_with_complex_cycle": 0.0010871670092456043, - "src/backend/tests/unit/graph/graph/test_utils.py::test_get_sorted_vertices_with_cycle": 0.00047041798825375736, - "src/backend/tests/unit/graph/graph/test_utils.py::test_get_sorted_vertices_with_stop": 0.000707583996700123, - "src/backend/tests/unit/graph/graph/test_utils.py::test_get_sorted_vertices_with_stop_at_chroma": 0.0005837079952470958, - "src/backend/tests/unit/graph/graph/test_utils.py::test_get_successors_a": 0.0005811250302940607, - "src/backend/tests/unit/graph/graph/test_utils.py::test_get_successors_z": 0.0004957909986842424, - "src/backend/tests/unit/graph/graph/test_utils.py::test_has_cycle": 0.0012301669921725988, - "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_a": 0.0005350420251488686, - "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_g": 0.0006856680265627801, - "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_h": 0.0004927500267513096, - "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_invalid_vertex": 0.0006574160070158541, - "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_m": 0.0004936670011375099, - "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_n_is_start": 0.0005057500093244016, - "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_t": 0.0010982909880112857, - "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_x": 0.0005366250115912408, - "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_z": 0.000909958005649969, + "src/backend/tests/unit/graph/graph/test_base.py::test_graph_set_with_valid_component": 0.0002511379999532437, + "src/backend/tests/unit/graph/graph/test_base.py::test_graph_with_edge": 0.01792719899998474, + "src/backend/tests/unit/graph/graph/test_callback_graph.py::test_callback_graph": 0.00023630099997262732, + "src/backend/tests/unit/graph/graph/test_cycles.py::test_conditional_router_max_iterations": 0.02631293599995388, + "src/backend/tests/unit/graph/graph/test_cycles.py::test_cycle_in_graph": 0.00023769299997411508, + "src/backend/tests/unit/graph/graph/test_cycles.py::test_cycle_in_graph_max_iterations": 0.023842438999963633, + "src/backend/tests/unit/graph/graph/test_cycles.py::test_that_outputs_cache_is_set_to_false_in_cycle": 0.021882905999973445, + "src/backend/tests/unit/graph/graph/test_cycles.py::test_updated_graph_with_max_iterations": 2.034349202000044, + "src/backend/tests/unit/graph/graph/test_cycles.py::test_updated_graph_with_prompts": 2.4665780930000096, + "src/backend/tests/unit/graph/graph/test_graph_state_model.py::test_graph_functional_start_graph_state_update": 0.03301417200003698, + "src/backend/tests/unit/graph/graph/test_graph_state_model.py::test_graph_state_model": 0.05054343599999811, + "src/backend/tests/unit/graph/graph/test_graph_state_model.py::test_graph_state_model_json_schema": 0.000282035999987329, + "src/backend/tests/unit/graph/graph/test_graph_state_model.py::test_graph_state_model_serialization": 0.025676873000008982, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_add_to_vertices_being_run": 0.0008680589999698896, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_are_all_predecessors_fulfilled": 0.0008610560000192891, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_are_all_predecessors_fulfilled__wrong": 0.0008685599999580518, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_build_run_map": 0.0008600940000746959, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_from_dict": 0.000894709000021976, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_from_dict_without_run_map__bad_case": 0.0008705649999569687, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_from_dict_without_run_predecessors__bad_case": 0.0008638809999865771, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_from_dict_without_vertices_being_run__bad_case": 0.0008926840000071934, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_from_dict_without_vertices_to_run__bad_case": 0.0008866950000765428, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_is_vertex_runnable": 0.0008620779999546357, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_is_vertex_runnable__wrong_is_active": 0.0008327429999326341, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_is_vertex_runnable__wrong_run_predecessors": 0.0008611259999042886, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_is_vertex_runnable__wrong_vertices_to_run": 0.000863219999928333, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_pickle": 0.0009143259999859765, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_remove_from_predecessors": 0.0008705740000891637, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_remove_vertex_from_runnables": 0.0008521509999468435, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_to_dict": 0.0009576770000307988, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_update_run_state": 0.0008895999999367632, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_update_vertex_run_state": 0.0008473899999899004, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_update_vertex_run_state__bad_case": 0.0008368709999899693, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_detects_cycles_in_simple_graph": 0.0008272620000866482, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_disconnected_components": 0.0008158420000086153, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_duplicate_edges": 0.0009043369999517381, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_identifies_multiple_cycles": 0.0031078069999921354, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_large_graphs_efficiency": 0.0015284310000538426, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_mixed_data_types_in_edges": 0.0009074420000274586, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_multiple_edges_between_same_nodes": 0.000820610999994642, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_no_cycles_present": 0.0008391759999994974, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_nodes_with_no_incoming_edges": 0.000906812000039281, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_nodes_with_no_outgoing_edges": 0.0008143879999238379, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_self_loops": 0.0008191690000103335, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindAllCycleEdges::test_single_node_no_edges": 0.0008276240000100188, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_detects_cycle_in_simple_graph": 0.000836892000052103, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_disconnected_components": 0.0008142899999938891, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_duplicate_edges": 0.0008210110000277382, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_empty_edges_list": 0.0008222839999803, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_identifies_first_cycle": 0.000817024999946625, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_large_graph_efficiency": 0.0008360200000083751, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_multiple_cycles": 0.0008166529999584782, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_multiple_edges_between_same_nodes": 0.0008260209999662038, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_nodes_with_no_outgoing_edges": 0.0008159020000562123, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_returns_none_when_no_cycle": 0.0008063930000048458, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_self_loop_cycle": 0.0008168939999677605, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleEdge::test_single_node_no_edges": 0.000813708000009683, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_correctly_identify_and_return_vertices_in_single_cycle": 0.0009461949999831631, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_detect_cycles_simple_graph": 0.0010576630000400655, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_duplicate_edges_fixed_fixed": 0.0009433700000158751, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_empty_edges": 0.000901000999988355, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_large_graphs_efficiently": 0.0009622349999744984, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_no_outgoing_edges": 0.000947396999947614, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_self_loops": 0.0009968099999468905, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_single_cycle": 0.0009307170000738552, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_two_inputs_in_cycle[0]": 0.0009935629999517914, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_two_inputs_in_cycle[1]": 0.0009890450000398232, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_two_inputs_in_cycle[2]": 0.0010020589999726326, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_two_inputs_in_cycle[3]": 0.0009751090000236218, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_handle_two_inputs_in_cycle[4]": 0.0009605020001117737, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_no_cycles_empty_list": 0.0009358760000282018, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_no_modification_of_input_edges_list": 0.0009769430000119428, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_non_string_vertex_ids": 0.0009575040000413537, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_process_disconnected_components": 0.000960620999933326, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_return_vertices_involved_in_multiple_cycles": 0.0009349730000280942, + "src/backend/tests/unit/graph/graph/test_utils.py::TestFindCycleVertices::test_single_vertex_no_edges": 0.0009537499999510146, + "src/backend/tests/unit/graph/graph/test_utils.py::test_chat_inputs_at_start": 0.0010307730000249649, + "src/backend/tests/unit/graph/graph/test_utils.py::test_get_sorted_vertices_exact_sequence": 0.000979576000020188, + "src/backend/tests/unit/graph/graph/test_utils.py::test_get_sorted_vertices_simple": 0.0009024530000374398, + "src/backend/tests/unit/graph/graph/test_utils.py::test_get_sorted_vertices_with_complex_cycle": 0.0009732059999691955, + "src/backend/tests/unit/graph/graph/test_utils.py::test_get_sorted_vertices_with_cycle": 0.001019622000058007, + "src/backend/tests/unit/graph/graph/test_utils.py::test_get_sorted_vertices_with_stop": 0.0009334810000041216, + "src/backend/tests/unit/graph/graph/test_utils.py::test_get_sorted_vertices_with_stop_at_chroma": 0.0009793880000188437, + "src/backend/tests/unit/graph/graph/test_utils.py::test_get_successors_a": 0.0008926830000746122, + "src/backend/tests/unit/graph/graph/test_utils.py::test_get_successors_z": 0.0011252000000467888, + "src/backend/tests/unit/graph/graph/test_utils.py::test_has_cycle": 0.0008250989999964986, + "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_a": 0.0008616980000510921, + "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_g": 0.0008630900000525799, + "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_h": 0.0008673290000160705, + "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_invalid_vertex": 0.0009999350000384766, + "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_m": 0.0008640130000117097, + "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_n_is_start": 0.0009213779999868166, + "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_t": 0.000861477000000832, + "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_x": 0.0008621680000260312, + "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_z": 0.0008623079999665606, "src/backend/tests/unit/graph/test_graph.py::test_build_edges": 0.001086625037714839, "src/backend/tests/unit/graph/test_graph.py::test_build_nodes": 0.0012113330303691328, "src/backend/tests/unit/graph/test_graph.py::test_build_params": 0.00745550001738593, "src/backend/tests/unit/graph/test_graph.py::test_circular_dependencies": 0.0011518750106915832, - "src/backend/tests/unit/graph/test_graph.py::test_find_last_node": 0.0011753749859053642, + "src/backend/tests/unit/graph/test_graph.py::test_find_last_node": 0.001344077999988258, "src/backend/tests/unit/graph/test_graph.py::test_get_node": 3.6276886249543168, "src/backend/tests/unit/graph/test_graph.py::test_get_node_neighbors_basic": 0.0015942919999361038, "src/backend/tests/unit/graph/test_graph.py::test_get_root_vertex": 0.00336533400695771, "src/backend/tests/unit/graph/test_graph.py::test_get_vertices_with_target": 0.0015001240535639226, "src/backend/tests/unit/graph/test_graph.py::test_graph_structure": 3.660518125980161, - "src/backend/tests/unit/graph/test_graph.py::test_invalid_node_types": 0.0011791250144597143, + "src/backend/tests/unit/graph/test_graph.py::test_invalid_node_types": 0.0013685040000837034, "src/backend/tests/unit/graph/test_graph.py::test_matched_type": 0.0011828330461867154, "src/backend/tests/unit/graph/test_graph.py::test_pickle_graph": 0.025576499931048602, - "src/backend/tests/unit/graph/test_graph.py::test_process_flow": 0.0014215410046745092, - "src/backend/tests/unit/graph/test_graph.py::test_process_flow_one_group": 0.002223041985416785, - "src/backend/tests/unit/graph/test_graph.py::test_process_flow_vector_store_grouped": 0.0030770000303164124, - "src/backend/tests/unit/graph/test_graph.py::test_serialize_graph": 0.7644289169693366, - "src/backend/tests/unit/graph/test_graph.py::test_set_new_target_handle": 0.0005397920031100512, - "src/backend/tests/unit/graph/test_graph.py::test_ungroup_node": 0.0016352509846910834, - "src/backend/tests/unit/graph/test_graph.py::test_update_source_handle": 0.0005107919860165566, - "src/backend/tests/unit/graph/test_graph.py::test_update_target_handle_proxy": 0.0006460419972427189, - "src/backend/tests/unit/graph/test_graph.py::test_update_template": 0.0006044579786248505, + "src/backend/tests/unit/graph/test_graph.py::test_process_flow": 0.0020753909999484677, + "src/backend/tests/unit/graph/test_graph.py::test_process_flow_one_group": 0.002661123000052612, + "src/backend/tests/unit/graph/test_graph.py::test_process_flow_vector_store_grouped": 0.004049423000026309, + "src/backend/tests/unit/graph/test_graph.py::test_serialize_graph": 0.2903884259999927, + "src/backend/tests/unit/graph/test_graph.py::test_set_new_target_handle": 0.0008556849999763472, + "src/backend/tests/unit/graph/test_graph.py::test_ungroup_node": 0.0019187479999800416, + "src/backend/tests/unit/graph/test_graph.py::test_update_source_handle": 0.0008682289999342174, + "src/backend/tests/unit/graph/test_graph.py::test_update_target_handle_proxy": 0.0008886179999763044, + "src/backend/tests/unit/graph/test_graph.py::test_update_template": 0.0010134999999991123, "src/backend/tests/unit/graph/test_graph.py::test_validate_edges": 0.0010510420543141663, - "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_correctly_accesses_descriptions_recommended_fix": 0.001257458992768079, - "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_create_model_from_valid_schema": 0.0013969589781481773, - "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_handle_empty_schema": 0.0008776659960858524, - "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_handle_large_schemas_efficiently": 0.0016080420173238963, - "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_handles_multiple_fields_fixed_with_instance_check": 0.0012837090180255473, - "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_manages_unknown_field_types": 0.0009701249655336142, - "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_nested_list_and_dict_types_handling": 0.0008985409804154187, - "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_no_duplicate_field_names_fixed_fixed": 0.04202587599866092, - "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_process_schema_missing_optional_keys_updated": 0.002521998976590112, - "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_raises_error_for_invalid_input_different_exception_with_specific_exception": 0.000609208014793694, - "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_returns_valid_model_class": 0.0009015409741550684, - "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_schema_fields_with_none_default": 0.0012640840141102672, - "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_supports_single_and_multiple_type_annotations": 0.0014757919998373836, - "src/backend/tests/unit/helpers/test_data.py::test_data_to_text_list[{name} is {age} years old-data0-expected0]": 0.001163041015388444, - "src/backend/tests/unit/helpers/test_data.py::test_data_to_text_list[{name} is {age} years old-data1-expected1]": 0.0006376249657478184, - "src/backend/tests/unit/helpers/test_data.py::test_data_to_text_list__data_contains_nested_data_key": 0.0004725419858004898, - "src/backend/tests/unit/helpers/test_data.py::test_data_to_text_list__data_with_data_attribute_empty": 0.0005149179778527468, - "src/backend/tests/unit/helpers/test_data.py::test_data_to_text_list__template_empty": 0.000672876019962132, - "src/backend/tests/unit/helpers/test_data.py::test_data_to_text_list__template_without_placeholder": 0.0009091249958146363, - "src/backend/tests/unit/helpers/test_data.py::test_data_to_text_list__template_without_placeholder_and_data_attribute_empty": 0.0009753329795785248, - "src/backend/tests/unit/helpers/test_data.py::test_data_to_text_list__template_wrong_placeholder": 0.0005736670282203704, - "src/backend/tests/unit/initial_setup/starter_projects/test_memory_chatbot.py::test_memory_chatbot": 60.05181770797935, - "src/backend/tests/unit/initial_setup/starter_projects/test_memory_chatbot.py::test_memory_chatbot_dump_components_and_edges": 0.016574167006183416, - "src/backend/tests/unit/initial_setup/starter_projects/test_memory_chatbot.py::test_memory_chatbot_dump_structure": 0.04619175096740946, - "src/backend/tests/unit/initial_setup/starter_projects/test_vector_store_rag.py::test_vector_store_rag": 0.16158741700928658, - "src/backend/tests/unit/initial_setup/starter_projects/test_vector_store_rag.py::test_vector_store_rag_add": 0.0700397499895189, - "src/backend/tests/unit/initial_setup/starter_projects/test_vector_store_rag.py::test_vector_store_rag_dump": 0.0376161259773653, - "src/backend/tests/unit/initial_setup/starter_projects/test_vector_store_rag.py::test_vector_store_rag_dump_components_and_edges": 0.038878416031366214, - "src/backend/tests/unit/inputs/test_inputs.py::test_bool_input_invalid": 0.0006863320304546505, - "src/backend/tests/unit/inputs/test_inputs.py::test_bool_input_valid": 0.0007916660106275231, - "src/backend/tests/unit/inputs/test_inputs.py::test_code_input_valid": 0.0005315420276019722, - "src/backend/tests/unit/inputs/test_inputs.py::test_data_input_valid": 0.00046829204075038433, - "src/backend/tests/unit/inputs/test_inputs.py::test_dict_input_invalid": 0.0005272900161799043, - "src/backend/tests/unit/inputs/test_inputs.py::test_dict_input_valid": 0.0005709579854737967, - "src/backend/tests/unit/inputs/test_inputs.py::test_dropdown_input_invalid": 0.0004479999770410359, - "src/backend/tests/unit/inputs/test_inputs.py::test_dropdown_input_valid": 0.00047050000284798443, - "src/backend/tests/unit/inputs/test_inputs.py::test_file_input_valid": 0.0004681670106947422, - "src/backend/tests/unit/inputs/test_inputs.py::test_float_input_invalid": 0.0007941249932628125, - "src/backend/tests/unit/inputs/test_inputs.py::test_float_input_valid": 0.0008741669880691916, - "src/backend/tests/unit/inputs/test_inputs.py::test_handle_input_invalid": 0.0004852490092162043, - "src/backend/tests/unit/inputs/test_inputs.py::test_handle_input_valid": 0.00048450002213940024, - "src/backend/tests/unit/inputs/test_inputs.py::test_instantiate_input_comprehensive": 0.0005724180082324892, - "src/backend/tests/unit/inputs/test_inputs.py::test_instantiate_input_invalid": 0.0007331660017371178, - "src/backend/tests/unit/inputs/test_inputs.py::test_instantiate_input_valid": 0.0004818339948542416, - "src/backend/tests/unit/inputs/test_inputs.py::test_int_input_invalid": 0.0007125830452423543, - "src/backend/tests/unit/inputs/test_inputs.py::test_int_input_valid": 0.0005811669689137489, - "src/backend/tests/unit/inputs/test_inputs.py::test_message_text_input_invalid": 0.0005387490091379732, - "src/backend/tests/unit/inputs/test_inputs.py::test_message_text_input_valid": 0.0006168750114738941, - "src/backend/tests/unit/inputs/test_inputs.py::test_multiline_input_invalid": 0.0004874579899478704, - "src/backend/tests/unit/inputs/test_inputs.py::test_multiline_input_valid": 0.0004753770481329411, - "src/backend/tests/unit/inputs/test_inputs.py::test_multiline_secret_input_invalid": 0.0004496260080486536, - "src/backend/tests/unit/inputs/test_inputs.py::test_multiline_secret_input_valid": 0.0004580819804687053, - "src/backend/tests/unit/inputs/test_inputs.py::test_multiselect_input_invalid": 0.0005143749876879156, - "src/backend/tests/unit/inputs/test_inputs.py::test_multiselect_input_valid": 0.0008066239824984223, - "src/backend/tests/unit/inputs/test_inputs.py::test_nested_dict_input_invalid": 0.000946874002693221, - "src/backend/tests/unit/inputs/test_inputs.py::test_nested_dict_input_valid": 0.0008341660141013563, - "src/backend/tests/unit/inputs/test_inputs.py::test_prompt_input_valid": 0.00047087398706935346, - "src/backend/tests/unit/inputs/test_inputs.py::test_secret_str_input_invalid": 0.0006172499852254987, - "src/backend/tests/unit/inputs/test_inputs.py::test_secret_str_input_valid": 0.0009428340126760304, - "src/backend/tests/unit/inputs/test_inputs.py::test_str_input_invalid": 0.0009670420258771628, - "src/backend/tests/unit/inputs/test_inputs.py::test_str_input_valid": 0.00046120802289806306, - "src/backend/tests/unit/inputs/test_inputs.py::test_table_input_invalid": 0.000547957984963432, - "src/backend/tests/unit/inputs/test_inputs.py::test_table_input_valid": 0.0007970000151544809, - "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_complex_nested_structures_handling": 0.0029271669627632946, - "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_default_values_assignment": 0.0006303739792201668, - "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_default_values_for_non_required_fields": 0.000651500973617658, - "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_empty_list_of_inputs": 0.0006389170011971146, - "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_field_types_conversion": 0.0006796680099796504, - "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_fields_creation_with_correct_types_and_attributes": 0.0006343739805743098, + "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_correctly_accesses_descriptions_recommended_fix": 0.0027463630000283956, + "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_create_model_from_valid_schema": 0.0020805810000297242, + "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_handle_empty_schema": 0.0012141650000216941, + "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_handle_large_schemas_efficiently": 0.0018388499999559826, + "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_handles_multiple_fields_fixed_with_instance_check": 0.0018551499999830412, + "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_manages_unknown_field_types": 0.0011173539999731474, + "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_nested_list_and_dict_types_handling": 0.0016606560000127502, + "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_no_duplicate_field_names_fixed_fixed": 0.002188331999946058, + "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_process_schema_missing_optional_keys_updated": 0.0019049619999691458, + "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_raises_error_for_invalid_input_different_exception_with_specific_exception": 0.0009327810000172576, + "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_returns_valid_model_class": 0.0015351339999938318, + "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_schema_fields_with_none_default": 0.0018262769999637385, + "src/backend/tests/unit/helpers/test_base_model_from_schema.py::TestBuildModelFromSchema::test_supports_single_and_multiple_type_annotations": 0.0016701239999861173, + "src/backend/tests/unit/helpers/test_data.py::test_data_to_text_list[{name} is {age} years old-data0-expected0]": 0.001055598000050395, + "src/backend/tests/unit/helpers/test_data.py::test_data_to_text_list[{name} is {age} years old-data1-expected1]": 0.001017137000076218, + "src/backend/tests/unit/helpers/test_data.py::test_data_to_text_list__data_contains_nested_data_key": 0.0009026439999502145, + "src/backend/tests/unit/helpers/test_data.py::test_data_to_text_list__data_with_data_attribute_empty": 0.0008929950000151621, + "src/backend/tests/unit/helpers/test_data.py::test_data_to_text_list__template_empty": 0.0008997969999882116, + "src/backend/tests/unit/helpers/test_data.py::test_data_to_text_list__template_without_placeholder": 0.0008947590000047967, + "src/backend/tests/unit/helpers/test_data.py::test_data_to_text_list__template_without_placeholder_and_data_attribute_empty": 0.0008950489999506317, + "src/backend/tests/unit/helpers/test_data.py::test_data_to_text_list__template_wrong_placeholder": 0.0009238540000069406, + "src/backend/tests/unit/initial_setup/starter_projects/test_memory_chatbot.py::test_memory_chatbot": 1.7861974219999865, + "src/backend/tests/unit/initial_setup/starter_projects/test_memory_chatbot.py::test_memory_chatbot_dump_components_and_edges": 0.0316383220000489, + "src/backend/tests/unit/initial_setup/starter_projects/test_memory_chatbot.py::test_memory_chatbot_dump_structure": 0.035721077999937734, + "src/backend/tests/unit/initial_setup/starter_projects/test_vector_store_rag.py::test_vector_store_rag": 0.17904318500001182, + "src/backend/tests/unit/initial_setup/starter_projects/test_vector_store_rag.py::test_vector_store_rag_add": 0.1396331499999519, + "src/backend/tests/unit/initial_setup/starter_projects/test_vector_store_rag.py::test_vector_store_rag_dump": 0.07668020699998124, + "src/backend/tests/unit/initial_setup/starter_projects/test_vector_store_rag.py::test_vector_store_rag_dump_components_and_edges": 0.07671972100001767, + "src/backend/tests/unit/inputs/test_inputs.py::test_bool_input_invalid": 0.0008525610000447159, + "src/backend/tests/unit/inputs/test_inputs.py::test_bool_input_valid": 0.0008364299999925606, + "src/backend/tests/unit/inputs/test_inputs.py::test_code_input_valid": 0.0008362099999317252, + "src/backend/tests/unit/inputs/test_inputs.py::test_data_input_valid": 0.0009104879999881632, + "src/backend/tests/unit/inputs/test_inputs.py::test_dict_input_invalid": 0.0008368620000283045, + "src/backend/tests/unit/inputs/test_inputs.py::test_dict_input_valid": 0.0008403789999533728, + "src/backend/tests/unit/inputs/test_inputs.py::test_dropdown_input_invalid": 0.000834586999928888, + "src/backend/tests/unit/inputs/test_inputs.py::test_dropdown_input_valid": 0.0008297479998873314, + "src/backend/tests/unit/inputs/test_inputs.py::test_file_input_valid": 0.000927489999980935, + "src/backend/tests/unit/inputs/test_inputs.py::test_float_input_invalid": 0.0008692209999594525, + "src/backend/tests/unit/inputs/test_inputs.py::test_float_input_valid": 0.0008314699999232289, + "src/backend/tests/unit/inputs/test_inputs.py::test_handle_input_invalid": 0.0008579699999700097, + "src/backend/tests/unit/inputs/test_inputs.py::test_handle_input_valid": 0.0008447850000266044, + "src/backend/tests/unit/inputs/test_inputs.py::test_instantiate_input_comprehensive": 0.0009998659999723714, + "src/backend/tests/unit/inputs/test_inputs.py::test_instantiate_input_invalid": 0.0009443599999485741, + "src/backend/tests/unit/inputs/test_inputs.py::test_instantiate_input_valid": 0.0008393039999532448, + "src/backend/tests/unit/inputs/test_inputs.py::test_int_input_invalid": 0.0008471300000110205, + "src/backend/tests/unit/inputs/test_inputs.py::test_int_input_valid": 0.0008394860000180415, + "src/backend/tests/unit/inputs/test_inputs.py::test_message_text_input_invalid": 0.0009005799999499686, + "src/backend/tests/unit/inputs/test_inputs.py::test_message_text_input_valid": 0.0009377399999834779, + "src/backend/tests/unit/inputs/test_inputs.py::test_multiline_input_invalid": 0.0008583219999991343, + "src/backend/tests/unit/inputs/test_inputs.py::test_multiline_input_valid": 0.0008448259999909169, + "src/backend/tests/unit/inputs/test_inputs.py::test_multiline_secret_input_invalid": 0.0008583299999145311, + "src/backend/tests/unit/inputs/test_inputs.py::test_multiline_secret_input_valid": 0.0008485429999609551, + "src/backend/tests/unit/inputs/test_inputs.py::test_multiselect_input_invalid": 0.0011448340000015378, + "src/backend/tests/unit/inputs/test_inputs.py::test_multiselect_input_valid": 0.00083382400003984, + "src/backend/tests/unit/inputs/test_inputs.py::test_nested_dict_input_invalid": 0.0008558360000279208, + "src/backend/tests/unit/inputs/test_inputs.py::test_nested_dict_input_valid": 0.0008453180000174143, + "src/backend/tests/unit/inputs/test_inputs.py::test_prompt_input_valid": 0.0008336250000411383, + "src/backend/tests/unit/inputs/test_inputs.py::test_secret_str_input_invalid": 0.000871013999983461, + "src/backend/tests/unit/inputs/test_inputs.py::test_secret_str_input_valid": 0.0008498650000774433, + "src/backend/tests/unit/inputs/test_inputs.py::test_str_input_invalid": 0.000903927000024396, + "src/backend/tests/unit/inputs/test_inputs.py::test_str_input_valid": 0.0009133640000413834, + "src/backend/tests/unit/inputs/test_inputs.py::test_table_input_invalid": 0.000923773999943478, + "src/backend/tests/unit/inputs/test_inputs.py::test_table_input_valid": 0.0012289930000406457, + "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_complex_nested_structures_handling": 0.0015369779999900857, + "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_default_values_assignment": 0.0012841239999943355, + "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_default_values_for_non_required_fields": 0.0014380520000258912, + "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_empty_list_of_inputs": 0.001124116000084996, + "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_field_types_conversion": 0.0012702280000098654, + "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_fields_creation_with_correct_types_and_attributes": 0.0012855070000341584, "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_invalid_field_types_handling": 0.0005195839912630618, - "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_is_list_attribute_processing": 0.0006963760242797434, - "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_is_list_handling": 0.0007649999752175063, - "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_missing_attributes_handling": 0.0012399579863995314, - "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_missing_optional_attributes": 0.0006575419974979013, - "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_mixed_required_optional_fields_processing": 0.0006677920173387975, - "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_multiple_input_types": 0.0008424169500358403, - "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_non_standard_field_types_handling": 0.0008621250162832439, - "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_none_default_value_handling": 0.0009242079977411777, - "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_options_attribute_processing": 0.0007198750099632889, - "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_options_handling": 0.0009986250079236925, - "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_passing_input_type_directly": 0.0006498339935205877, - "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_schema_model_creation": 0.0006096660217735916, - "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_single_input_type_conversion": 0.001271915971301496, - "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_single_input_type_replica": 0.0010511670261621475, - "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_special_characters_in_names_handling": 0.0007371680112555623, - "src/backend/tests/unit/io/test_io_schema.py::test_create_input_schema": 0.0014953330100979656, - "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_create_column_with_valid_formatter": 0.0006357899983413517, - "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_create_column_without_display_name": 0.0004896240134257823, - "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_create_with_type_instead_of_formatter": 0.00047175103100016713, - "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_default_sortable_filterable": 0.000452458014478907, - "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_description_and_default": 0.0005717090098187327, - "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_formatter_explicitly_set_to_enum": 0.00045691701234318316, - "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_formatter_none_when_not_provided": 0.0005209579830989242, - "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_formatter_set_based_on_value": 0.0005174569960217923, - "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_invalid_formatter_raises_value_error": 0.000995416019577533, - "src/backend/tests/unit/schema/test_content_block.py::TestContentBlock::test_allow_markdown_override": 0.0005888760206289589, - "src/backend/tests/unit/schema/test_content_block.py::TestContentBlock::test_initialize_with_empty_contents": 0.0005127080075908452, - "src/backend/tests/unit/schema/test_content_block.py::TestContentBlock::test_initialize_with_valid_title_and_contents": 0.000688834028551355, - "src/backend/tests/unit/schema/test_content_block.py::TestContentBlock::test_invalid_contents_type": 0.000586417008889839, - "src/backend/tests/unit/schema/test_content_block.py::TestContentBlock::test_media_url_handling": 0.0005662500043399632, - "src/backend/tests/unit/schema/test_content_block.py::TestContentBlock::test_serialize_contents": 0.0006543329800479114, - "src/backend/tests/unit/schema/test_content_block.py::TestContentBlock::test_single_content_conversion": 0.000454624998383224, - "src/backend/tests/unit/schema/test_content_block.py::TestContentBlock::test_validate_different_content_types": 0.0005211670068092644, - "src/backend/tests/unit/schema/test_content_types.py::TestBaseContent::test_base_content_serialization": 0.0005464999994728714, - "src/backend/tests/unit/schema/test_content_types.py::TestBaseContent::test_base_content_with_duration": 0.0005071679770480841, - "src/backend/tests/unit/schema/test_content_types.py::TestBaseContent::test_base_content_with_header": 0.000658083037706092, - "src/backend/tests/unit/schema/test_content_types.py::TestCodeContent::test_code_content_creation": 0.0005813750030938536, - "src/backend/tests/unit/schema/test_content_types.py::TestCodeContent::test_code_content_without_title": 0.0004985000414308161, - "src/backend/tests/unit/schema/test_content_types.py::TestErrorContent::test_error_content_creation": 0.0004764990007970482, - "src/backend/tests/unit/schema/test_content_types.py::TestErrorContent::test_error_content_optional_fields": 0.0004851660050917417, - "src/backend/tests/unit/schema/test_content_types.py::TestJSONContent::test_json_content_complex_data": 0.0005868740263395011, - "src/backend/tests/unit/schema/test_content_types.py::TestJSONContent::test_json_content_creation": 0.00048208297812379897, - "src/backend/tests/unit/schema/test_content_types.py::TestMediaContent::test_media_content_creation": 0.000485416006995365, - "src/backend/tests/unit/schema/test_content_types.py::TestMediaContent::test_media_content_without_caption": 0.0004606660222634673, - "src/backend/tests/unit/schema/test_content_types.py::TestTextContent::test_text_content_creation": 0.00044308300130069256, - "src/backend/tests/unit/schema/test_content_types.py::TestTextContent::test_text_content_with_duration": 0.0004785419732797891, - "src/backend/tests/unit/schema/test_content_types.py::TestToolContent::test_tool_content_creation": 0.0004895830061286688, - "src/backend/tests/unit/schema/test_content_types.py::TestToolContent::test_tool_content_minimal": 0.00043208396527916193, - "src/backend/tests/unit/schema/test_content_types.py::TestToolContent::test_tool_content_with_error": 0.00046262596151791513, - "src/backend/tests/unit/schema/test_content_types.py::test_content_type_discrimination": 0.00043200000072829425, - "src/backend/tests/unit/schema/test_image.py::test_get_file_paths": 0.0037078760215081275, - "src/backend/tests/unit/schema/test_image.py::test_get_file_paths__empty": 0.0009367500024382025, - "src/backend/tests/unit/schema/test_image.py::test_get_files": 0.010231043008388951, - "src/backend/tests/unit/schema/test_image.py::test_get_files__convert_to_base64": 0.012360333028482273, - "src/backend/tests/unit/schema/test_image.py::test_get_files__empty": 0.0017009590228553861, - "src/backend/tests/unit/schema/test_image.py::test_is_image_file": 0.012265249999472871, - "src/backend/tests/unit/schema/test_image.py::test_is_image_file__not_image": 0.07640637701842934, - "src/backend/tests/unit/schema/test_schema_data.py::TestDataSchema::test_data_to_message_ai_response": 0.0005299600015860051, - "src/backend/tests/unit/schema/test_schema_data.py::TestDataSchema::test_data_to_message_invalid_image_path": 0.0008059580286499113, - "src/backend/tests/unit/schema/test_schema_data.py::TestDataSchema::test_data_to_message_missing_required_keys": 0.000562083994736895, - "src/backend/tests/unit/schema/test_schema_data.py::TestDataSchema::test_data_to_message_with_image": 0.001725374982925132, - "src/backend/tests/unit/schema/test_schema_data.py::TestDataSchema::test_data_to_message_with_multiple_images": 0.0012237500050105155, - "src/backend/tests/unit/schema/test_schema_data.py::TestDataSchema::test_data_to_message_with_text_only": 0.0007512919837608933, - "src/backend/tests/unit/schema/test_schema_data_set.py::test_add_row_with_data_object": 0.0010538319766055793, - "src/backend/tests/unit/schema/test_schema_data_set.py::test_add_row_with_dict": 0.001061916002072394, - "src/backend/tests/unit/schema/test_schema_data_set.py::test_add_rows_mixed_types": 0.0008609170035924762, - "src/backend/tests/unit/schema/test_schema_data_set.py::test_add_rows_with_data_objects": 0.000866542017320171, - "src/backend/tests/unit/schema/test_schema_data_set.py::test_add_rows_with_dicts": 0.0009006250184029341, - "src/backend/tests/unit/schema/test_schema_data_set.py::test_dataset_pandas_operations": 0.0033400410029571503, - "src/backend/tests/unit/schema/test_schema_data_set.py::test_dataset_type_preservation": 0.000955166993662715, - "src/backend/tests/unit/schema/test_schema_data_set.py::test_dataset_with_null_values": 0.0009391249623149633, - "src/backend/tests/unit/schema/test_schema_data_set.py::test_from_data_list_basic": 0.0008791240106802434, - "src/backend/tests/unit/schema/test_schema_data_set.py::test_from_data_list_empty": 0.0006839570123702288, - "src/backend/tests/unit/schema/test_schema_data_set.py::test_from_data_list_missing_fields": 0.0007614589994773269, - "src/backend/tests/unit/schema/test_schema_data_set.py::test_from_data_list_nested_data": 0.0006001240108162165, - "src/backend/tests/unit/schema/test_schema_data_set.py::test_init_with_data_objects": 0.0006831259815953672, - "src/backend/tests/unit/schema/test_schema_data_set.py::test_init_with_dict_of_lists": 0.0005775840545538813, - "src/backend/tests/unit/schema/test_schema_data_set.py::test_init_with_dicts": 0.0006177090108394623, - "src/backend/tests/unit/schema/test_schema_data_set.py::test_init_with_invalid_list": 0.0005528329638764262, - "src/backend/tests/unit/schema/test_schema_data_set.py::test_init_with_kwargs": 0.0007204170397017151, - "src/backend/tests/unit/schema/test_schema_data_set.py::test_init_with_none": 0.0005492909986060113, - "src/backend/tests/unit/schema/test_schema_data_set.py::test_init_with_pandas_dataframe": 0.0006072510441299528, - "src/backend/tests/unit/schema/test_schema_data_set.py::test_to_data_list_basic": 0.001504249987192452, - "src/backend/tests/unit/schema/test_schema_data_set.py::test_to_data_list_empty": 0.0006358730024658144, - "src/backend/tests/unit/schema/test_schema_data_set.py::test_to_data_list_modified_data": 0.001095540967071429, + "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_is_list_attribute_processing": 0.0013554499999486325, + "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_is_list_handling": 0.001358605000064017, + "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_missing_attributes_handling": 0.0012744260000090435, + "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_missing_optional_attributes": 0.001478287000054479, + "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_mixed_required_optional_fields_processing": 0.0014601029999994353, + "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_multiple_input_types": 0.001600574000008237, + "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_non_standard_field_types_handling": 0.0012805599999410333, + "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_none_default_value_handling": 0.0012946049999413844, + "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_options_attribute_processing": 0.0014249769999992168, + "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_options_handling": 0.0013773700000001554, + "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_passing_input_type_directly": 0.0008957399999758309, + "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_schema_model_creation": 0.001251515000035397, + "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_single_input_type_conversion": 0.0013229780000756364, + "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_single_input_type_replica": 0.0012953869999705603, + "src/backend/tests/unit/io/test_io_schema.py::TestCreateInputSchema::test_special_characters_in_names_handling": 0.0012709620000350697, + "src/backend/tests/unit/io/test_io_schema.py::test_create_input_schema": 0.0029239830000165057, + "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_create_column_with_valid_formatter": 0.0008610350000139988, + "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_create_column_without_display_name": 0.000857388000042647, + "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_create_with_type_instead_of_formatter": 0.0008427920000144695, + "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_default_sortable_filterable": 0.0008578110000030392, + "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_description_and_default": 0.0008531909999760501, + "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_formatter_explicitly_set_to_enum": 0.0008396459999744366, + "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_formatter_none_when_not_provided": 0.0008307410000156779, + "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_formatter_set_based_on_value": 0.0008499360000087108, + "src/backend/tests/unit/io/test_table_schema.py::TestColumn::test_invalid_formatter_raises_value_error": 0.0009885750000080407, + "src/backend/tests/unit/schema/test_content_block.py::TestContentBlock::test_allow_markdown_override": 0.0008442150000291804, + "src/backend/tests/unit/schema/test_content_block.py::TestContentBlock::test_initialize_with_empty_contents": 0.0008403269999917029, + "src/backend/tests/unit/schema/test_content_block.py::TestContentBlock::test_initialize_with_valid_title_and_contents": 0.0011575490000268474, + "src/backend/tests/unit/schema/test_content_block.py::TestContentBlock::test_invalid_contents_type": 0.0009476170000084494, + "src/backend/tests/unit/schema/test_content_block.py::TestContentBlock::test_media_url_handling": 0.0008516870000221388, + "src/backend/tests/unit/schema/test_content_block.py::TestContentBlock::test_serialize_contents": 0.0009439020000741039, + "src/backend/tests/unit/schema/test_content_block.py::TestContentBlock::test_single_content_conversion": 0.0008357870000850198, + "src/backend/tests/unit/schema/test_content_block.py::TestContentBlock::test_validate_different_content_types": 0.001098078000040914, + "src/backend/tests/unit/schema/test_content_types.py::TestBaseContent::test_base_content_serialization": 0.0008891180000318855, + "src/backend/tests/unit/schema/test_content_types.py::TestBaseContent::test_base_content_with_duration": 0.0008242279999421953, + "src/backend/tests/unit/schema/test_content_types.py::TestBaseContent::test_base_content_with_header": 0.0008415500000182874, + "src/backend/tests/unit/schema/test_content_types.py::TestCodeContent::test_code_content_creation": 0.0008295879999877798, + "src/backend/tests/unit/schema/test_content_types.py::TestCodeContent::test_code_content_without_title": 0.0008410190000063267, + "src/backend/tests/unit/schema/test_content_types.py::TestErrorContent::test_error_content_creation": 0.0008450249999896187, + "src/backend/tests/unit/schema/test_content_types.py::TestErrorContent::test_error_content_optional_fields": 0.0008322930000304041, + "src/backend/tests/unit/schema/test_content_types.py::TestJSONContent::test_json_content_complex_data": 0.0008305379999455909, + "src/backend/tests/unit/schema/test_content_types.py::TestJSONContent::test_json_content_creation": 0.0008274719999121771, + "src/backend/tests/unit/schema/test_content_types.py::TestMediaContent::test_media_content_creation": 0.0008156920000033097, + "src/backend/tests/unit/schema/test_content_types.py::TestMediaContent::test_media_content_without_caption": 0.0008265519999781645, + "src/backend/tests/unit/schema/test_content_types.py::TestTextContent::test_text_content_creation": 0.0008351279999487815, + "src/backend/tests/unit/schema/test_content_types.py::TestTextContent::test_text_content_with_duration": 0.0008102110000436369, + "src/backend/tests/unit/schema/test_content_types.py::TestToolContent::test_tool_content_creation": 0.0008384019999994052, + "src/backend/tests/unit/schema/test_content_types.py::TestToolContent::test_tool_content_minimal": 0.0008249979999277457, + "src/backend/tests/unit/schema/test_content_types.py::TestToolContent::test_tool_content_with_error": 0.0008282350000854422, + "src/backend/tests/unit/schema/test_content_types.py::test_content_type_discrimination": 0.0008546640000304251, + "src/backend/tests/unit/schema/test_image.py::test_get_file_paths": 0.001937984000051074, + "src/backend/tests/unit/schema/test_image.py::test_get_file_paths__empty": 0.0009797380000122757, + "src/backend/tests/unit/schema/test_image.py::test_get_files": 0.009629281000002266, + "src/backend/tests/unit/schema/test_image.py::test_get_files__convert_to_base64": 0.009000828000012007, + "src/backend/tests/unit/schema/test_image.py::test_get_files__empty": 0.0013410309999812853, + "src/backend/tests/unit/schema/test_image.py::test_is_image_file": 0.001731238999980178, + "src/backend/tests/unit/schema/test_image.py::test_is_image_file__not_image": 0.0012707719999980327, + "src/backend/tests/unit/schema/test_schema_data.py::TestDataSchema::test_data_to_message_ai_response": 0.0008870250000541091, + "src/backend/tests/unit/schema/test_schema_data.py::TestDataSchema::test_data_to_message_invalid_image_path": 0.0013065259999507361, + "src/backend/tests/unit/schema/test_schema_data.py::TestDataSchema::test_data_to_message_missing_required_keys": 0.0009734860000207846, + "src/backend/tests/unit/schema/test_schema_data.py::TestDataSchema::test_data_to_message_with_image": 0.001507611999954861, + "src/backend/tests/unit/schema/test_schema_data.py::TestDataSchema::test_data_to_message_with_multiple_images": 0.001599732999977732, + "src/backend/tests/unit/schema/test_schema_data.py::TestDataSchema::test_data_to_message_with_text_only": 0.0008936179999636806, + "src/backend/tests/unit/schema/test_schema_data_set.py::test_add_row_with_data_object": 0.0019870959999366278, + "src/backend/tests/unit/schema/test_schema_data_set.py::test_add_row_with_dict": 0.002064501999939239, + "src/backend/tests/unit/schema/test_schema_data_set.py::test_add_rows_mixed_types": 0.0019046629999479592, + "src/backend/tests/unit/schema/test_schema_data_set.py::test_add_rows_with_data_objects": 0.001958363000085228, + "src/backend/tests/unit/schema/test_schema_data_set.py::test_add_rows_with_dicts": 0.0019058150001001195, + "src/backend/tests/unit/schema/test_schema_data_set.py::test_dataset_pandas_operations": 0.003237168999930873, + "src/backend/tests/unit/schema/test_schema_data_set.py::test_dataset_type_preservation": 0.0018333400000187794, + "src/backend/tests/unit/schema/test_schema_data_set.py::test_dataset_with_null_values": 0.0016073979999760013, + "src/backend/tests/unit/schema/test_schema_data_set.py::test_from_data_list_basic": 0.0015083929999377688, + "src/backend/tests/unit/schema/test_schema_data_set.py::test_from_data_list_empty": 0.001180251999983284, + "src/backend/tests/unit/schema/test_schema_data_set.py::test_from_data_list_missing_fields": 0.0013771090000318509, + "src/backend/tests/unit/schema/test_schema_data_set.py::test_from_data_list_nested_data": 0.001179822000040076, + "src/backend/tests/unit/schema/test_schema_data_set.py::test_init_with_data_objects": 0.001264067999954932, + "src/backend/tests/unit/schema/test_schema_data_set.py::test_init_with_dict_of_lists": 0.0011847709999415201, + "src/backend/tests/unit/schema/test_schema_data_set.py::test_init_with_dicts": 0.0012183210000102918, + "src/backend/tests/unit/schema/test_schema_data_set.py::test_init_with_invalid_list": 0.0010027300000388095, + "src/backend/tests/unit/schema/test_schema_data_set.py::test_init_with_kwargs": 0.0016511610000975452, + "src/backend/tests/unit/schema/test_schema_data_set.py::test_init_with_none": 0.0010520440000050257, + "src/backend/tests/unit/schema/test_schema_data_set.py::test_init_with_pandas_dataframe": 0.0012006800000108342, + "src/backend/tests/unit/schema/test_schema_data_set.py::test_to_data_list_basic": 0.0017527280000422252, + "src/backend/tests/unit/schema/test_schema_data_set.py::test_to_data_list_empty": 0.001192784000068059, + "src/backend/tests/unit/schema/test_schema_data_set.py::test_to_data_list_modified_data": 0.002206575000002431, "src/backend/tests/unit/schema/test_schema_message.py::test_message_async_prompt_serialization": 0.00209424999775365, - "src/backend/tests/unit/schema/test_schema_message.py::test_message_from_ai_text": 0.0009055820119101554, - "src/backend/tests/unit/schema/test_schema_message.py::test_message_from_human_text": 0.0011728749959729612, - "src/backend/tests/unit/schema/test_schema_message.py::test_message_prompt_serialization": 0.03958516800776124, - "src/backend/tests/unit/schema/test_schema_message.py::test_message_serialization": 0.0022127490083221346, - "src/backend/tests/unit/schema/test_schema_message.py::test_message_to_lc_without_sender": 0.0009864589956123382, - "src/backend/tests/unit/schema/test_schema_message.py::test_message_with_invalid_image_path": 0.0008816669869702309, - "src/backend/tests/unit/schema/test_schema_message.py::test_message_with_multiple_images": 0.005279625009279698, - "src/backend/tests/unit/schema/test_schema_message.py::test_message_with_single_image": 0.0037838330026715994, - "src/backend/tests/unit/schema/test_schema_message.py::test_message_without_sender": 0.0005642080213874578, - "src/backend/tests/unit/schema/test_schema_message.py::test_timestamp_serialization": 0.002011499978834763, - "src/backend/tests/unit/services/variable/test_service.py::test_create_variable": 0.030735207023099065, + "src/backend/tests/unit/schema/test_schema_message.py::test_message_from_ai_text": 0.001054256000031728, + "src/backend/tests/unit/schema/test_schema_message.py::test_message_from_human_text": 0.0011102910000317934, + "src/backend/tests/unit/schema/test_schema_message.py::test_message_prompt_serialization": 0.004611400999976922, + "src/backend/tests/unit/schema/test_schema_message.py::test_message_serialization": 0.0011808820000851483, + "src/backend/tests/unit/schema/test_schema_message.py::test_message_to_lc_without_sender": 0.0010286490000339654, + "src/backend/tests/unit/schema/test_schema_message.py::test_message_with_invalid_image_path": 0.0013597360000403569, + "src/backend/tests/unit/schema/test_schema_message.py::test_message_with_multiple_images": 0.002983776000007765, + "src/backend/tests/unit/schema/test_schema_message.py::test_message_with_single_image": 0.0026932430000101704, + "src/backend/tests/unit/schema/test_schema_message.py::test_message_without_sender": 0.0010878100000013546, + "src/backend/tests/unit/schema/test_schema_message.py::test_timestamp_serialization": 0.0023642500000278233, + "src/backend/tests/unit/services/variable/test_service.py::test_create_variable": 0.06097476200000074, "src/backend/tests/unit/services/variable/test_service.py::test_delete_varaible_by_id": 0.0060262500192038715, - "src/backend/tests/unit/services/variable/test_service.py::test_delete_variable": 0.03545183202368207, + "src/backend/tests/unit/services/variable/test_service.py::test_delete_variable": 0.06482606600002327, "src/backend/tests/unit/services/variable/test_service.py::test_delete_variable__ValueError": 0.0035743750049732625, - "src/backend/tests/unit/services/variable/test_service.py::test_delete_variable__valueerror": 0.021380457998020574, - "src/backend/tests/unit/services/variable/test_service.py::test_delete_variable_by_id": 0.018017418013187125, + "src/backend/tests/unit/services/variable/test_service.py::test_delete_variable__valueerror": 0.054536943000016436, + "src/backend/tests/unit/services/variable/test_service.py::test_delete_variable_by_id": 0.06077441799999406, "src/backend/tests/unit/services/variable/test_service.py::test_delete_variable_by_id__ValueError": 0.27340612601256, - "src/backend/tests/unit/services/variable/test_service.py::test_delete_variable_by_id__valueerror": 0.013961874996311963, - "src/backend/tests/unit/services/variable/test_service.py::test_get_variable": 0.03908879199298099, + "src/backend/tests/unit/services/variable/test_service.py::test_delete_variable_by_id__valueerror": 0.05533207799999218, + "src/backend/tests/unit/services/variable/test_service.py::test_get_variable": 0.05800278900005651, "src/backend/tests/unit/services/variable/test_service.py::test_get_variable__TypeError": 0.00458791694836691, "src/backend/tests/unit/services/variable/test_service.py::test_get_variable__ValueError": 0.003811584028881043, - "src/backend/tests/unit/services/variable/test_service.py::test_get_variable__typeerror": 0.021183083998039365, - "src/backend/tests/unit/services/variable/test_service.py::test_get_variable__valueerror": 0.02469458300038241, - "src/backend/tests/unit/services/variable/test_service.py::test_initialize_user_variables__create_and_update": 0.2744384999969043, + "src/backend/tests/unit/services/variable/test_service.py::test_get_variable__typeerror": 0.06353268500004106, + "src/backend/tests/unit/services/variable/test_service.py::test_get_variable__valueerror": 0.05523385600002939, + "src/backend/tests/unit/services/variable/test_service.py::test_initialize_user_variables__create_and_update": 0.1582808560000899, "src/backend/tests/unit/services/variable/test_service.py::test_initialize_user_variables__donkey": 0.0002315010060556233, - "src/backend/tests/unit/services/variable/test_service.py::test_initialize_user_variables__not_found_variable": 0.08482616700348444, - "src/backend/tests/unit/services/variable/test_service.py::test_initialize_user_variables__skipping_environment_variable_storage": 0.044630832970142365, - "src/backend/tests/unit/services/variable/test_service.py::test_list_variables": 0.023415125004248694, - "src/backend/tests/unit/services/variable/test_service.py::test_list_variables__empty": 0.014996624988270923, - "src/backend/tests/unit/services/variable/test_service.py::test_update_variable": 0.02424570804578252, + "src/backend/tests/unit/services/variable/test_service.py::test_initialize_user_variables__not_found_variable": 1.371318298999995, + "src/backend/tests/unit/services/variable/test_service.py::test_initialize_user_variables__skipping_environment_variable_storage": 0.054047291000017594, + "src/backend/tests/unit/services/variable/test_service.py::test_list_variables": 0.060885066000025745, + "src/backend/tests/unit/services/variable/test_service.py::test_list_variables__empty": 0.05356663499998149, + "src/backend/tests/unit/services/variable/test_service.py::test_update_variable": 0.059376641000028485, "src/backend/tests/unit/services/variable/test_service.py::test_update_variable__ValueError": 0.0036237920285202563, - "src/backend/tests/unit/services/variable/test_service.py::test_update_variable__valueerror": 0.016594666027231142, - "src/backend/tests/unit/services/variable/test_service.py::test_update_variable_fields": 0.02089120799791999, - "src/backend/tests/unit/test_api_key.py::test_create_api_key": 1.9644282510271296, - "src/backend/tests/unit/test_api_key.py::test_delete_api_key": 2.192397584003629, - "src/backend/tests/unit/test_api_key.py::test_get_api_keys": 60.86269125097897, + "src/backend/tests/unit/services/variable/test_service.py::test_update_variable__valueerror": 0.05357588900005794, + "src/backend/tests/unit/services/variable/test_service.py::test_update_variable_fields": 0.06772730900001989, + "src/backend/tests/unit/test_api_key.py::test_create_api_key": 2.4383941670000695, + "src/backend/tests/unit/test_api_key.py::test_delete_api_key": 2.4880297700000256, + "src/backend/tests/unit/test_api_key.py::test_get_api_keys": 2.489674997999998, "src/backend/tests/unit/test_cache.py::test_build_graph": 1.1988659180001378, - "src/backend/tests/unit/test_chat_endpoint.py::test_build_flow": 6.589139710005838, - "src/backend/tests/unit/test_chat_endpoint.py::test_build_flow_from_request_data": 3.079383332951693, - "src/backend/tests/unit/test_chat_endpoint.py::test_build_flow_with_frozen_path": 4.286654250026913, - "src/backend/tests/unit/test_cli.py::test_components_path": 0.12414570900727995, - "src/backend/tests/unit/test_cli.py::test_superuser": 0.6926332079747226, - "src/backend/tests/unit/test_custom_component.py::test_build_config_field_keys": 0.0009260419756174088, - "src/backend/tests/unit/test_custom_component.py::test_build_config_field_value_keys": 0.00046887403004802763, - "src/backend/tests/unit/test_custom_component.py::test_build_config_field_values_dict": 0.000485417025629431, - "src/backend/tests/unit/test_custom_component.py::test_build_config_fields_dict": 0.0004610410251189023, - "src/backend/tests/unit/test_custom_component.py::test_build_config_has_fields": 0.0004714579845312983, - "src/backend/tests/unit/test_custom_component.py::test_build_config_no_code": 0.0004766249912790954, - "src/backend/tests/unit/test_custom_component.py::test_build_config_return_type": 0.0004982909886166453, - "src/backend/tests/unit/test_custom_component.py::test_code_parser_get_tree": 0.0007103339885361493, - "src/backend/tests/unit/test_custom_component.py::test_code_parser_init": 0.0005205420020502061, - "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_ann_assign": 0.000991793000139296, - "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_arg_no_annotation": 0.0006343750283122063, - "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_arg_with_annotation": 0.0004905000096186996, - "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_assign": 0.0005260000179987401, - "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_callable_details_no_args": 0.0007980840164236724, - "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_classes": 0.0007507099653594196, - "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_classes_raises": 0.0009575419826433063, - "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_function_def_init": 0.001605249010026455, - "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_function_def_not_init": 0.0006307500007096678, - "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_functions": 0.0010010410333052278, - "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_global_vars": 0.0005474580102600157, - "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_imports_import": 0.0006469580112025142, - "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_imports_importfrom": 0.0005059590330347419, - "src/backend/tests/unit/test_custom_component.py::test_code_parser_syntax_error": 0.0020100000547245145, - "src/backend/tests/unit/test_custom_component.py::test_component_code_null_error": 0.0005807090201415122, - "src/backend/tests/unit/test_custom_component.py::test_component_get_code_tree": 0.004097958968486637, - "src/backend/tests/unit/test_custom_component.py::test_component_get_code_tree_syntax_error": 0.0010139169753529131, - "src/backend/tests/unit/test_custom_component.py::test_component_get_function_valid": 0.0005010830063838512, - "src/backend/tests/unit/test_custom_component.py::test_component_init": 0.0007371659739874303, - "src/backend/tests/unit/test_custom_component.py::test_custom_component_build_not_implemented": 0.0004977929929737002, - "src/backend/tests/unit/test_custom_component.py::test_custom_component_build_template_config": 0.001852041983511299, - "src/backend/tests/unit/test_custom_component.py::test_custom_component_class_template_validation_no_code": 0.000510375015437603, - "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_code_tree_syntax_error": 0.0008203320030588657, - "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_function": 0.0009011660004034638, - "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_function_entrypoint_args": 0.0022577079944312572, - "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_function_entrypoint_args_no_args": 0.0010351240052841604, - "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_function_entrypoint_return_type": 0.0017597910482436419, - "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_function_entrypoint_return_type_no_return_type": 0.0016554179892409593, - "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_function_valid": 0.0006716680072713643, - "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_main_class_name": 0.00207462499383837, - "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_main_class_name_no_main_class": 0.0007182910048868507, - "src/backend/tests/unit/test_custom_component.py::test_custom_component_init": 0.0005619159783236682, - "src/backend/tests/unit/test_custom_component.py::test_custom_component_multiple_outputs": 0.007548624038463458, - "src/backend/tests/unit/test_custom_component.py::test_custom_component_subclass_from_lctoolcomponent": 0.0026019169890787452, + "src/backend/tests/unit/test_chat_endpoint.py::test_build_flow": 13.626035798000032, + "src/backend/tests/unit/test_chat_endpoint.py::test_build_flow_from_request_data": 3.657343103999949, + "src/backend/tests/unit/test_chat_endpoint.py::test_build_flow_with_frozen_path": 4.323225192999985, + "src/backend/tests/unit/test_cli.py::test_components_path": 0.17699876000006043, + "src/backend/tests/unit/test_cli.py::test_superuser": 1.247621658000071, + "src/backend/tests/unit/test_custom_component.py::test_build_config_field_keys": 0.0009617730000286429, + "src/backend/tests/unit/test_custom_component.py::test_build_config_field_value_keys": 0.0009371680000072047, + "src/backend/tests/unit/test_custom_component.py::test_build_config_field_values_dict": 0.0009429290000184665, + "src/backend/tests/unit/test_custom_component.py::test_build_config_fields_dict": 0.0009374880000336816, + "src/backend/tests/unit/test_custom_component.py::test_build_config_has_fields": 0.000953329000026315, + "src/backend/tests/unit/test_custom_component.py::test_build_config_no_code": 0.0009250850000626087, + "src/backend/tests/unit/test_custom_component.py::test_build_config_return_type": 0.0009555620000583076, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_get_tree": 0.0010776399999485875, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_init": 0.0009528260000593036, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_ann_assign": 0.0009286120000524534, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_arg_no_annotation": 0.0009114290000411529, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_arg_with_annotation": 0.000910858999930042, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_assign": 0.0009521959999574392, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_callable_details_no_args": 0.000926298000024417, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_classes": 0.0013071179998860316, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_classes_raises": 0.0008969420000539685, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_function_def_init": 0.0009519460000433355, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_function_def_not_init": 0.0009195540000064284, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_functions": 0.0009096760000488757, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_global_vars": 0.0008749119999720278, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_imports_import": 0.0010846630000287405, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_imports_importfrom": 0.0008677779999857194, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_syntax_error": 0.02817475199992714, + "src/backend/tests/unit/test_custom_component.py::test_component_code_null_error": 0.0009074539999573972, + "src/backend/tests/unit/test_custom_component.py::test_component_get_code_tree": 0.0038515650000476853, + "src/backend/tests/unit/test_custom_component.py::test_component_get_code_tree_syntax_error": 0.001362020999977176, + "src/backend/tests/unit/test_custom_component.py::test_component_get_function_valid": 0.0009396629999969264, + "src/backend/tests/unit/test_custom_component.py::test_component_init": 0.0008882369999696493, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_build_not_implemented": 0.0009398330000180977, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_build_template_config": 0.0015786540000135574, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_class_template_validation_no_code": 0.0009342920000676713, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_code_tree_syntax_error": 0.0011387340000510449, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_function": 0.0009662219999881927, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_function_entrypoint_args": 0.0024090339999816024, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_function_entrypoint_args_no_args": 0.0014505469999335219, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_function_entrypoint_return_type": 0.0021366759999636997, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_function_entrypoint_return_type_no_return_type": 0.0014630590000024313, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_function_valid": 0.0009379990000297767, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_main_class_name": 0.0021913080000217633, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_main_class_name_no_main_class": 0.001077119000001403, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_init": 0.0008821569999213352, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_multiple_outputs": 0.007632596000007652, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_subclass_from_lctoolcomponent": 0.004793360000007851, "src/backend/tests/unit/test_custom_component.py::test_list_flows_flow_objects": 1.981454541994026, "src/backend/tests/unit/test_custom_component.py::test_list_flows_return_type": 0.36947908403817564, "src/backend/tests/unit/test_custom_component_with_client.py::test_feature_flags_add_toolkit_output": 2.7484489580092486, - "src/backend/tests/unit/test_custom_component_with_client.py::test_list_flows_flow_objects": 60.855126042006304, - "src/backend/tests/unit/test_custom_component_with_client.py::test_list_flows_return_type": 1.6071675409912132, - "src/backend/tests/unit/test_data_class.py::test_add_method_for_integers": 0.0004671249771490693, - "src/backend/tests/unit/test_data_class.py::test_add_method_for_strings": 0.00046862498857080936, - "src/backend/tests/unit/test_data_class.py::test_add_method_with_non_overlapping_keys": 0.00043766596354544163, - "src/backend/tests/unit/test_data_class.py::test_conversion_from_document": 0.0005622089956887066, - "src/backend/tests/unit/test_data_class.py::test_conversion_to_document": 0.0009009990317281336, - "src/backend/tests/unit/test_data_class.py::test_custom_attribute_get_set_del": 0.000487540994072333, - "src/backend/tests/unit/test_data_class.py::test_custom_attribute_setting_and_getting": 0.001279166026506573, - "src/backend/tests/unit/test_data_class.py::test_data_initialization": 0.0006964170024730265, - "src/backend/tests/unit/test_data_class.py::test_deep_copy": 0.0004529169818852097, - "src/backend/tests/unit/test_data_class.py::test_dir_includes_data_keys": 0.0005240000027697533, - "src/backend/tests/unit/test_data_class.py::test_dir_reflects_attribute_deletion": 0.0004913339798804373, - "src/backend/tests/unit/test_data_class.py::test_get_text_with_empty_data": 0.0029436250042635947, - "src/backend/tests/unit/test_data_class.py::test_get_text_with_none_data": 0.0021194999862927943, - "src/backend/tests/unit/test_data_class.py::test_get_text_with_text_key": 0.00045870800386182964, - "src/backend/tests/unit/test_data_class.py::test_get_text_without_text_key": 0.0004479999770410359, - "src/backend/tests/unit/test_data_class.py::test_str_and_dir_methods": 0.0005875420174561441, - "src/backend/tests/unit/test_data_class.py::test_validate_data_with_extra_keys": 0.0004847069794777781, + "src/backend/tests/unit/test_custom_component_with_client.py::test_list_flows_flow_objects": 1.8984923220000383, + "src/backend/tests/unit/test_custom_component_with_client.py::test_list_flows_return_type": 1.9321621699999696, + "src/backend/tests/unit/test_data_class.py::test_add_method_for_integers": 0.0008936569999491439, + "src/backend/tests/unit/test_data_class.py::test_add_method_for_strings": 0.000909566000018458, + "src/backend/tests/unit/test_data_class.py::test_add_method_with_non_overlapping_keys": 0.0009186830000658119, + "src/backend/tests/unit/test_data_class.py::test_conversion_from_document": 0.0009276889999796367, + "src/backend/tests/unit/test_data_class.py::test_conversion_to_document": 0.0009215389999326362, + "src/backend/tests/unit/test_data_class.py::test_custom_attribute_get_set_del": 0.0009090460000038547, + "src/backend/tests/unit/test_data_class.py::test_custom_attribute_setting_and_getting": 0.0008907109999540808, + "src/backend/tests/unit/test_data_class.py::test_data_initialization": 0.0010716780000166182, + "src/backend/tests/unit/test_data_class.py::test_deep_copy": 0.0009099270000092474, + "src/backend/tests/unit/test_data_class.py::test_dir_includes_data_keys": 0.0009980719999589382, + "src/backend/tests/unit/test_data_class.py::test_dir_reflects_attribute_deletion": 0.0009875719999854482, + "src/backend/tests/unit/test_data_class.py::test_get_text_with_empty_data": 0.0008989369999881092, + "src/backend/tests/unit/test_data_class.py::test_get_text_with_none_data": 0.0008955100000207494, + "src/backend/tests/unit/test_data_class.py::test_get_text_with_text_key": 0.0008703729999410825, + "src/backend/tests/unit/test_data_class.py::test_get_text_without_text_key": 0.0008782479999922543, + "src/backend/tests/unit/test_data_class.py::test_str_and_dir_methods": 0.0010483350000072278, + "src/backend/tests/unit/test_data_class.py::test_validate_data_with_extra_keys": 0.0009212090000119133, "src/backend/tests/unit/test_data_components.py::test_build_with_multiple_urls": 2.1151568749919534, "src/backend/tests/unit/test_data_components.py::test_directory_component_build_with_multithreading": 0.011123959033284336, "src/backend/tests/unit/test_data_components.py::test_directory_without_mocks": 0.17772862600395456, @@ -807,140 +807,140 @@ "src/backend/tests/unit/test_data_components.py::test_successful_get_request": 0.04254975001094863, "src/backend/tests/unit/test_data_components.py::test_timeout": 0.023703540966380388, "src/backend/tests/unit/test_data_components.py::test_url_component": 2.0934785840217955, - "src/backend/tests/unit/test_database.py::test_create_flow": 61.15077787503833, - "src/backend/tests/unit/test_database.py::test_create_flow_with_invalid_data": 2.038831791986013, - "src/backend/tests/unit/test_database.py::test_create_flows": 2.3764430010051, - "src/backend/tests/unit/test_database.py::test_delete_flow": 2.3041219169681426, - "src/backend/tests/unit/test_database.py::test_delete_flows": 2.2675352929800283, - "src/backend/tests/unit/test_database.py::test_delete_flows_with_transaction_and_build": 3.354580041981535, - "src/backend/tests/unit/test_database.py::test_delete_folder_with_flows_with_transaction_and_build": 4.108820832974743, - "src/backend/tests/unit/test_database.py::test_delete_nonexistent_flow": 1.913142292003613, - "src/backend/tests/unit/test_database.py::test_download_file": 1.8800671239732765, - "src/backend/tests/unit/test_database.py::test_get_flows_from_folder_pagination": 2.0132921670156065, - "src/backend/tests/unit/test_database.py::test_get_flows_from_folder_pagination_with_params": 0.46941437598434277, - "src/backend/tests/unit/test_database.py::test_get_nonexistent_flow": 2.1664244160056114, + "src/backend/tests/unit/test_database.py::test_create_flow": 2.87644766599999, + "src/backend/tests/unit/test_database.py::test_create_flow_with_invalid_data": 2.7760963320000087, + "src/backend/tests/unit/test_database.py::test_create_flows": 3.0414354189999813, + "src/backend/tests/unit/test_database.py::test_delete_flow": 2.746668014000022, + "src/backend/tests/unit/test_database.py::test_delete_flows": 3.810648250999975, + "src/backend/tests/unit/test_database.py::test_delete_flows_with_transaction_and_build": 7.3494734129999415, + "src/backend/tests/unit/test_database.py::test_delete_folder_with_flows_with_transaction_and_build": 9.078202569999974, + "src/backend/tests/unit/test_database.py::test_delete_nonexistent_flow": 2.9147398400000384, + "src/backend/tests/unit/test_database.py::test_download_file": 3.0359395580000523, + "src/backend/tests/unit/test_database.py::test_get_flows_from_folder_pagination": 2.8643930370000135, + "src/backend/tests/unit/test_database.py::test_get_flows_from_folder_pagination_with_params": 2.8779482089999533, + "src/backend/tests/unit/test_database.py::test_get_nonexistent_flow": 2.838347282999962, "src/backend/tests/unit/test_database.py::test_load_flows": 2.0784470409998903, "src/backend/tests/unit/test_database.py::test_migrate_transactions": 3.3142859160434455, "src/backend/tests/unit/test_database.py::test_migrate_transactions_no_duckdb": 4.5406213329406455, - "src/backend/tests/unit/test_database.py::test_read_flow": 2.0156764580169693, - "src/backend/tests/unit/test_database.py::test_read_flows": 1.8752048350288533, - "src/backend/tests/unit/test_database.py::test_read_flows_components_only": 2.371597625024151, - "src/backend/tests/unit/test_database.py::test_read_flows_components_only_paginated": 4.0027398339880165, - "src/backend/tests/unit/test_database.py::test_read_flows_custom_page_size": 3.4408999160223175, - "src/backend/tests/unit/test_database.py::test_read_flows_invalid_page": 56.2004378749989, - "src/backend/tests/unit/test_database.py::test_read_flows_invalid_size": 2.928554583020741, - "src/backend/tests/unit/test_database.py::test_read_flows_no_pagination_params": 4.219182209024439, - "src/backend/tests/unit/test_database.py::test_read_flows_pagination_with_flows": 3.6609882909979206, - "src/backend/tests/unit/test_database.py::test_read_flows_pagination_with_params": 2.395285335020162, + "src/backend/tests/unit/test_database.py::test_read_flow": 2.836520316000019, + "src/backend/tests/unit/test_database.py::test_read_flows": 2.991093091000039, + "src/backend/tests/unit/test_database.py::test_read_flows_components_only": 4.737432314000046, + "src/backend/tests/unit/test_database.py::test_read_flows_components_only_paginated": 4.570875163999972, + "src/backend/tests/unit/test_database.py::test_read_flows_custom_page_size": 8.581435204000002, + "src/backend/tests/unit/test_database.py::test_read_flows_invalid_page": 8.549610177999966, + "src/backend/tests/unit/test_database.py::test_read_flows_invalid_size": 8.455935182000019, + "src/backend/tests/unit/test_database.py::test_read_flows_no_pagination_params": 8.485107660999972, + "src/backend/tests/unit/test_database.py::test_read_flows_pagination_with_flows": 8.579808335999985, + "src/backend/tests/unit/test_database.py::test_read_flows_pagination_with_params": 3.980665920999911, "src/backend/tests/unit/test_database.py::test_read_flows_pagination_without_params": 2.8355551669956185, - "src/backend/tests/unit/test_database.py::test_read_folder": 2.199611208954593, - "src/backend/tests/unit/test_database.py::test_read_folder_with_component_filter": 2.1619469989964273, - "src/backend/tests/unit/test_database.py::test_read_folder_with_flows": 2.021901956992224, - "src/backend/tests/unit/test_database.py::test_read_folder_with_pagination": 1.7787785000400618, - "src/backend/tests/unit/test_database.py::test_read_folder_with_search": 2.4412399999564514, - "src/backend/tests/unit/test_database.py::test_read_nonexistent_folder": 2.0123780829890165, - "src/backend/tests/unit/test_database.py::test_read_only_starter_projects": 2.197070750029525, - "src/backend/tests/unit/test_database.py::test_sqlite_pragmas": 0.038666624983306974, - "src/backend/tests/unit/test_database.py::test_update_flow": 2.1027825839992147, - "src/backend/tests/unit/test_database.py::test_update_flow_idempotency": 2.1899328750150744, - "src/backend/tests/unit/test_database.py::test_update_nonexistent_flow": 1.791931832995033, - "src/backend/tests/unit/test_database.py::test_upload_file": 2.0402936660393607, - "src/backend/tests/unit/test_endpoints.py::test_build_vertex_invalid_flow_id": 3.6820290409668814, - "src/backend/tests/unit/test_endpoints.py::test_build_vertex_invalid_vertex_id": 4.737509751023026, - "src/backend/tests/unit/test_endpoints.py::test_get_all": 2.4718662499799393, - "src/backend/tests/unit/test_endpoints.py::test_get_vertices": 6.6795441239955835, - "src/backend/tests/unit/test_endpoints.py::test_get_vertices_flow_not_found": 2.139131792006083, - "src/backend/tests/unit/test_endpoints.py::test_invalid_flow_id": 2.606465541990474, - "src/backend/tests/unit/test_endpoints.py::test_invalid_prompt": 1.321965167968301, - "src/backend/tests/unit/test_endpoints.py::test_invalid_run_with_input_type_chat": 2.9536292500270065, - "src/backend/tests/unit/test_endpoints.py::test_post_validate_code": 1.2536179160233587, - "src/backend/tests/unit/test_endpoints.py::test_starter_projects": 3.857797708013095, - "src/backend/tests/unit/test_endpoints.py::test_successful_run_no_payload": 3.359024875011528, - "src/backend/tests/unit/test_endpoints.py::test_successful_run_with_input_type_any": 4.108383165992564, - "src/backend/tests/unit/test_endpoints.py::test_successful_run_with_input_type_chat": 2.9523796240100637, - "src/backend/tests/unit/test_endpoints.py::test_successful_run_with_input_type_text": 3.4838103340298403, - "src/backend/tests/unit/test_endpoints.py::test_successful_run_with_output_type_any": 4.690193791029742, - "src/backend/tests/unit/test_endpoints.py::test_successful_run_with_output_type_debug": 2.8726715829980094, - "src/backend/tests/unit/test_endpoints.py::test_successful_run_with_output_type_text": 7.479705125966575, - "src/backend/tests/unit/test_endpoints.py::test_valid_prompt": 1.2916431250341702, - "src/backend/tests/unit/test_endpoints.py::test_various_prompts[The weather is {weather} today.-expected_input_variables1]": 1.4037372909951955, - "src/backend/tests/unit/test_endpoints.py::test_various_prompts[This prompt has no variables.-expected_input_variables2]": 1.3895610830222722, - "src/backend/tests/unit/test_endpoints.py::test_various_prompts[{a}, {b}, and {c} are variables.-expected_input_variables3]": 1.1683527090062853, - "src/backend/tests/unit/test_endpoints.py::test_various_prompts[{color} is my favorite color.-expected_input_variables0]": 1.8550612919789273, - "src/backend/tests/unit/test_experimental_components.py::test_python_function_component": 0.0023522080155089498, + "src/backend/tests/unit/test_database.py::test_read_folder": 2.984865248999995, + "src/backend/tests/unit/test_database.py::test_read_folder_with_component_filter": 3.295226573999969, + "src/backend/tests/unit/test_database.py::test_read_folder_with_flows": 5.4210594190000165, + "src/backend/tests/unit/test_database.py::test_read_folder_with_pagination": 3.0385564889999728, + "src/backend/tests/unit/test_database.py::test_read_folder_with_search": 3.6536241800000653, + "src/backend/tests/unit/test_database.py::test_read_nonexistent_folder": 2.8818369109999935, + "src/backend/tests/unit/test_database.py::test_read_only_starter_projects": 2.884348755000019, + "src/backend/tests/unit/test_database.py::test_sqlite_pragmas": 0.08756539600000224, + "src/backend/tests/unit/test_database.py::test_update_flow": 2.8699065140000357, + "src/backend/tests/unit/test_database.py::test_update_flow_idempotency": 3.008951749000005, + "src/backend/tests/unit/test_database.py::test_update_nonexistent_flow": 2.7701023620000456, + "src/backend/tests/unit/test_database.py::test_upload_file": 3.131136706999996, + "src/backend/tests/unit/test_endpoints.py::test_build_vertex_invalid_flow_id": 2.9887332060000062, + "src/backend/tests/unit/test_endpoints.py::test_build_vertex_invalid_vertex_id": 2.999980830000027, + "src/backend/tests/unit/test_endpoints.py::test_get_all": 2.8001394880000134, + "src/backend/tests/unit/test_endpoints.py::test_get_vertices": 3.0012545999999247, + "src/backend/tests/unit/test_endpoints.py::test_get_vertices_flow_not_found": 2.993569400999945, + "src/backend/tests/unit/test_endpoints.py::test_invalid_flow_id": 3.0913359099998843, + "src/backend/tests/unit/test_endpoints.py::test_invalid_prompt": 1.9184288529999662, + "src/backend/tests/unit/test_endpoints.py::test_invalid_run_with_input_type_chat": 2.5363325110000687, + "src/backend/tests/unit/test_endpoints.py::test_post_validate_code": 1.9335739690000082, + "src/backend/tests/unit/test_endpoints.py::test_starter_projects": 2.4900983030000816, + "src/backend/tests/unit/test_endpoints.py::test_successful_run_no_payload": 3.081583386000034, + "src/backend/tests/unit/test_endpoints.py::test_successful_run_with_input_type_any": 2.464807622999956, + "src/backend/tests/unit/test_endpoints.py::test_successful_run_with_input_type_chat": 2.4591628030000265, + "src/backend/tests/unit/test_endpoints.py::test_successful_run_with_input_type_text": 2.422916278999992, + "src/backend/tests/unit/test_endpoints.py::test_successful_run_with_output_type_any": 4.292567315000042, + "src/backend/tests/unit/test_endpoints.py::test_successful_run_with_output_type_debug": 2.4390407959998583, + "src/backend/tests/unit/test_endpoints.py::test_successful_run_with_output_type_text": 2.9907259799999792, + "src/backend/tests/unit/test_endpoints.py::test_valid_prompt": 1.94827842899997, + "src/backend/tests/unit/test_endpoints.py::test_various_prompts[The weather is {weather} today.-expected_input_variables1]": 1.959300067000072, + "src/backend/tests/unit/test_endpoints.py::test_various_prompts[This prompt has no variables.-expected_input_variables2]": 1.963228564000019, + "src/backend/tests/unit/test_endpoints.py::test_various_prompts[{a}, {b}, and {c} are variables.-expected_input_variables3]": 1.9353461299999708, + "src/backend/tests/unit/test_endpoints.py::test_various_prompts[{color} is my favorite color.-expected_input_variables0]": 1.96998700000006, + "src/backend/tests/unit/test_experimental_components.py::test_python_function_component": 0.0029309790000979774, "src/backend/tests/unit/test_files.py::test_delete_file": 11.937014124996495, "src/backend/tests/unit/test_files.py::test_download_file": 9.813468083040789, "src/backend/tests/unit/test_files.py::test_file_operations": 11.151997918030247, "src/backend/tests/unit/test_files.py::test_list_files": 11.372431917930953, "src/backend/tests/unit/test_files.py::test_upload_file": 9.378826959000435, - "src/backend/tests/unit/test_frontend_nodes.py::test_frontend_node_to_dict": 0.002045374014414847, - "src/backend/tests/unit/test_frontend_nodes.py::test_template_field_defaults": 0.0006101249891798943, - "src/backend/tests/unit/test_frontend_nodes.py::test_template_to_dict": 0.0006701670063193887, - "src/backend/tests/unit/test_helper_components.py::test_data_as_text_component": 0.0016713749791961163, - "src/backend/tests/unit/test_helper_components.py::test_uuid_generator_component": 0.009983040974475443, - "src/backend/tests/unit/test_initial_setup.py::test_create_or_update_starter_projects": 1.749959500040859, - "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://example.com/myzip.zip-https://example.com/myzip.zip]": 0.0008645000052638352, - "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles-https://github.com/langflow-ai/langflow-bundles/archive/refs/heads/main.zip]": 0.2847006250231061, - "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles.git-https://github.com/langflow-ai/langflow-bundles/archive/refs/heads/main.zip]": 0.06656250002561137, - "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/-https://github.com/langflow-ai/langflow-bundles/archive/refs/heads/main.zip]": 0.06863537500612438, - "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/commit/68428ce16729a385fe1bcc0f1ec91fd5f5f420b9-https://github.com/langflow-ai/langflow-bundles/archive/68428ce16729a385fe1bcc0f1ec91fd5f5f420b9.zip]": 0.001583666045917198, - "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/commit/68428ce16729a385fe1bcc0f1ec91fd5f5f420b9/-https://github.com/langflow-ai/langflow-bundles/archive/68428ce16729a385fe1bcc0f1ec91fd5f5f420b9.zip]": 0.0016609580197837204, - "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/releases/tag/foo/v1.0.0-https://github.com/langflow-ai/langflow-bundles/archive/refs/tags/foo/v1.0.0.zip]": 0.0015999579627532512, - "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/releases/tag/foo/v1.0.0/-https://github.com/langflow-ai/langflow-bundles/archive/refs/tags/foo/v1.0.0.zip]": 0.001497415971243754, - "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/releases/tag/v1.0.0-0_1-https://github.com/langflow-ai/langflow-bundles/archive/refs/tags/v1.0.0-0_1.zip]": 0.0012162899947725236, - "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/tree/some.branch-0_1-https://github.com/langflow-ai/langflow-bundles/archive/refs/heads/some.branch-0_1.zip]": 0.0009832080104388297, - "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/tree/some/branch-https://github.com/langflow-ai/langflow-bundles/archive/refs/heads/some/branch.zip]": 0.00095370999770239, - "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/tree/some/branch/-https://github.com/langflow-ai/langflow-bundles/archive/refs/heads/some/branch.zip]": 0.0009293759940192103, - "src/backend/tests/unit/test_initial_setup.py::test_get_project_data": 0.06260033303988166, - "src/backend/tests/unit/test_initial_setup.py::test_load_bundles_from_urls": 2.010753290989669, - "src/backend/tests/unit/test_initial_setup.py::test_load_starter_projects": 0.06191241598571651, - "src/backend/tests/unit/test_initial_setup.py::test_refresh_starter_projects": 44.21900395900593, - "src/backend/tests/unit/test_kubernetes_secrets.py::test_create_secret": 0.0028289170295465738, - "src/backend/tests/unit/test_kubernetes_secrets.py::test_delete_secret": 0.0014570829807780683, - "src/backend/tests/unit/test_kubernetes_secrets.py::test_email_address": 0.0005324160156305879, - "src/backend/tests/unit/test_kubernetes_secrets.py::test_encode_string": 0.0011274160060565919, - "src/backend/tests/unit/test_kubernetes_secrets.py::test_encode_uuid": 0.0005093319632578641, - "src/backend/tests/unit/test_kubernetes_secrets.py::test_ends_with_non_alphanumeric": 0.0007085000106599182, - "src/backend/tests/unit/test_kubernetes_secrets.py::test_get_secret": 0.0017705830105114728, - "src/backend/tests/unit/test_kubernetes_secrets.py::test_long_string": 0.0006246669800020754, - "src/backend/tests/unit/test_kubernetes_secrets.py::test_starts_with_non_alphanumeric": 0.0005008329753763974, - "src/backend/tests/unit/test_kubernetes_secrets.py::test_uuid_case_insensitivity": 0.0005446659633889794, + "src/backend/tests/unit/test_frontend_nodes.py::test_frontend_node_to_dict": 0.0011274830000047587, + "src/backend/tests/unit/test_frontend_nodes.py::test_template_field_defaults": 0.0009520649999785746, + "src/backend/tests/unit/test_frontend_nodes.py::test_template_to_dict": 0.0010140319999436542, + "src/backend/tests/unit/test_helper_components.py::test_data_as_text_component": 0.0018502909999824624, + "src/backend/tests/unit/test_helper_components.py::test_uuid_generator_component": 0.007799780999903305, + "src/backend/tests/unit/test_initial_setup.py::test_create_or_update_starter_projects": 3.1193974049999724, + "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://example.com/myzip.zip-https://example.com/myzip.zip]": 0.0012224490000107835, + "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles-https://github.com/langflow-ai/langflow-bundles/archive/refs/heads/main.zip]": 0.1316048269999328, + "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles.git-https://github.com/langflow-ai/langflow-bundles/archive/refs/heads/main.zip]": 0.04889278899997862, + "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/-https://github.com/langflow-ai/langflow-bundles/archive/refs/heads/main.zip]": 0.04720997400011129, + "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/commit/68428ce16729a385fe1bcc0f1ec91fd5f5f420b9-https://github.com/langflow-ai/langflow-bundles/archive/68428ce16729a385fe1bcc0f1ec91fd5f5f420b9.zip]": 0.0014307879999932993, + "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/commit/68428ce16729a385fe1bcc0f1ec91fd5f5f420b9/-https://github.com/langflow-ai/langflow-bundles/archive/68428ce16729a385fe1bcc0f1ec91fd5f5f420b9.zip]": 0.0012335909999592332, + "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/releases/tag/foo/v1.0.0-https://github.com/langflow-ai/langflow-bundles/archive/refs/tags/foo/v1.0.0.zip]": 0.0012521460000698426, + "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/releases/tag/foo/v1.0.0/-https://github.com/langflow-ai/langflow-bundles/archive/refs/tags/foo/v1.0.0.zip]": 0.0012675849999368438, + "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/releases/tag/v1.0.0-0_1-https://github.com/langflow-ai/langflow-bundles/archive/refs/tags/v1.0.0-0_1.zip]": 0.0014138159999674826, + "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/tree/some.branch-0_1-https://github.com/langflow-ai/langflow-bundles/archive/refs/heads/some.branch-0_1.zip]": 0.0015772819999710919, + "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/tree/some/branch-https://github.com/langflow-ai/langflow-bundles/archive/refs/heads/some/branch.zip]": 0.0012880320001613654, + "src/backend/tests/unit/test_initial_setup.py::test_detect_github_url[https://github.com/langflow-ai/langflow-bundles/tree/some/branch/-https://github.com/langflow-ai/langflow-bundles/archive/refs/heads/some/branch.zip]": 0.0012868089999074073, + "src/backend/tests/unit/test_initial_setup.py::test_get_project_data": 0.0025134890000799714, + "src/backend/tests/unit/test_initial_setup.py::test_load_bundles_from_urls": 0.016170900000020083, + "src/backend/tests/unit/test_initial_setup.py::test_load_starter_projects": 0.002681553000002168, + "src/backend/tests/unit/test_initial_setup.py::test_refresh_starter_projects": 81.8317340530001, + "src/backend/tests/unit/test_kubernetes_secrets.py::test_create_secret": 0.00502352000000883, + "src/backend/tests/unit/test_kubernetes_secrets.py::test_delete_secret": 0.002451522000114892, + "src/backend/tests/unit/test_kubernetes_secrets.py::test_email_address": 0.0008266209999874263, + "src/backend/tests/unit/test_kubernetes_secrets.py::test_encode_string": 0.0008416709999892191, + "src/backend/tests/unit/test_kubernetes_secrets.py::test_encode_uuid": 0.0008667170001217528, + "src/backend/tests/unit/test_kubernetes_secrets.py::test_ends_with_non_alphanumeric": 0.0008263000000852117, + "src/backend/tests/unit/test_kubernetes_secrets.py::test_get_secret": 0.002754797000193321, + "src/backend/tests/unit/test_kubernetes_secrets.py::test_long_string": 0.0008579289998351669, + "src/backend/tests/unit/test_kubernetes_secrets.py::test_starts_with_non_alphanumeric": 0.0008149989999992613, + "src/backend/tests/unit/test_kubernetes_secrets.py::test_uuid_case_insensitivity": 0.0008329730000014024, "src/backend/tests/unit/test_loading.py::test_load_flow_from_json": 1.2976477909833193, - "src/backend/tests/unit/test_loading.py::test_load_flow_from_json_object": 0.6467816259828396, + "src/backend/tests/unit/test_loading.py::test_load_flow_from_json_object": 0.0028610159999971074, "src/backend/tests/unit/test_loading.py::test_load_flow_from_json_with_tweaks": 0.005636290996335447, - "src/backend/tests/unit/test_logger.py::test_enabled": 0.0029836660251021385, - "src/backend/tests/unit/test_logger.py::test_get_after_timestamp": 0.000709459010977298, - "src/backend/tests/unit/test_logger.py::test_get_before_timestamp": 0.000666042004013434, - "src/backend/tests/unit/test_logger.py::test_get_last_n": 0.0005757919861935079, - "src/backend/tests/unit/test_logger.py::test_init_default": 0.0007694590021856129, - "src/backend/tests/unit/test_logger.py::test_init_with_env_variable": 0.0018917080014944077, - "src/backend/tests/unit/test_logger.py::test_len": 0.0005289580440148711, - "src/backend/tests/unit/test_logger.py::test_max_size": 0.0004955000185873359, - "src/backend/tests/unit/test_logger.py::test_write": 0.0013112090236973017, - "src/backend/tests/unit/test_logger.py::test_write_overflow": 0.0006425830360967666, - "src/backend/tests/unit/test_login.py::test_login_successful": 2.5617062500095926, - "src/backend/tests/unit/test_login.py::test_login_unsuccessful_wrong_password": 1.7109307090286165, - "src/backend/tests/unit/test_login.py::test_login_unsuccessful_wrong_username": 1.692093624966219, - "src/backend/tests/unit/test_messages.py::test_aadd_messages": 1.4588275830028579, - "src/backend/tests/unit/test_messages.py::test_aadd_messagetables": 2.1810505829926115, - "src/backend/tests/unit/test_messages.py::test_add_messages": 2.9681069999933243, + "src/backend/tests/unit/test_logger.py::test_enabled": 0.0017041390000258616, + "src/backend/tests/unit/test_logger.py::test_get_after_timestamp": 0.0009037850001050174, + "src/backend/tests/unit/test_logger.py::test_get_before_timestamp": 0.0015503629998647739, + "src/backend/tests/unit/test_logger.py::test_get_last_n": 0.0015400519999957396, + "src/backend/tests/unit/test_logger.py::test_init_default": 0.0009756800001241572, + "src/backend/tests/unit/test_logger.py::test_init_with_env_variable": 0.0016752260000885144, + "src/backend/tests/unit/test_logger.py::test_len": 0.0009000890001971129, + "src/backend/tests/unit/test_logger.py::test_max_size": 0.001040239999952064, + "src/backend/tests/unit/test_logger.py::test_write": 0.0008919439998180678, + "src/backend/tests/unit/test_logger.py::test_write_overflow": 0.0009001980000675758, + "src/backend/tests/unit/test_login.py::test_login_successful": 3.0913613910000777, + "src/backend/tests/unit/test_login.py::test_login_unsuccessful_wrong_password": 3.0855716239999538, + "src/backend/tests/unit/test_login.py::test_login_unsuccessful_wrong_username": 3.1884867610000356, + "src/backend/tests/unit/test_messages.py::test_aadd_messages": 3.120579249000002, + "src/backend/tests/unit/test_messages.py::test_aadd_messagetables": 5.10543803500002, + "src/backend/tests/unit/test_messages.py::test_add_messages": 3.112485033000098, "src/backend/tests/unit/test_messages.py::test_add_messagetables": 0.05725845799315721, - "src/backend/tests/unit/test_messages.py::test_adelete_messages": 1.5412001250078902, - "src/backend/tests/unit/test_messages.py::test_aget_messages": 1.9358062089886516, - "src/backend/tests/unit/test_messages.py::test_astore_message": 1.9218659580219537, - "src/backend/tests/unit/test_messages.py::test_aupdate_message_with_content_blocks": 2.6823564989608712, - "src/backend/tests/unit/test_messages.py::test_aupdate_message_with_nested_properties": 1.6107732909731567, - "src/backend/tests/unit/test_messages.py::test_aupdate_message_with_timestamp": 1.9843022500281222, - "src/backend/tests/unit/test_messages.py::test_aupdate_mixed_messages": 1.5979106670129113, - "src/backend/tests/unit/test_messages.py::test_aupdate_multiple_messages": 1.7078075829776935, - "src/backend/tests/unit/test_messages.py::test_aupdate_multiple_messages_with_timestamps": 1.8181749999930616, - "src/backend/tests/unit/test_messages.py::test_aupdate_nonexistent_message": 1.82905212402693, - "src/backend/tests/unit/test_messages.py::test_aupdate_single_message": 2.4442409170151222, - "src/backend/tests/unit/test_messages.py::test_convert_to_langchain[convert_to_langchain_type]": 0.0011992079962510616, - "src/backend/tests/unit/test_messages.py::test_convert_to_langchain[message]": 0.0014809580170549452, - "src/backend/tests/unit/test_messages.py::test_delete_messages": 2.5259901660028845, - "src/backend/tests/unit/test_messages.py::test_get_messages": 1.6296198749914765, - "src/backend/tests/unit/test_messages.py::test_store_message": 1.6690328329568729, + "src/backend/tests/unit/test_messages.py::test_adelete_messages": 3.172358645000031, + "src/backend/tests/unit/test_messages.py::test_aget_messages": 3.0886127569999644, + "src/backend/tests/unit/test_messages.py::test_astore_message": 3.23048018999998, + "src/backend/tests/unit/test_messages.py::test_aupdate_message_with_content_blocks": 3.2278247650000367, + "src/backend/tests/unit/test_messages.py::test_aupdate_message_with_nested_properties": 3.212353744999973, + "src/backend/tests/unit/test_messages.py::test_aupdate_message_with_timestamp": 3.2626515570000265, + "src/backend/tests/unit/test_messages.py::test_aupdate_mixed_messages": 3.2511831419999453, + "src/backend/tests/unit/test_messages.py::test_aupdate_multiple_messages": 3.1643990229998735, + "src/backend/tests/unit/test_messages.py::test_aupdate_multiple_messages_with_timestamps": 5.355720972999961, + "src/backend/tests/unit/test_messages.py::test_aupdate_nonexistent_message": 3.1162081220001028, + "src/backend/tests/unit/test_messages.py::test_aupdate_single_message": 5.3383241640001415, + "src/backend/tests/unit/test_messages.py::test_convert_to_langchain[convert_to_langchain_type]": 0.0011466079999991052, + "src/backend/tests/unit/test_messages.py::test_convert_to_langchain[message]": 0.001329730999827916, + "src/backend/tests/unit/test_messages.py::test_delete_messages": 3.0626761830001215, + "src/backend/tests/unit/test_messages.py::test_get_messages": 3.188332910999975, + "src/backend/tests/unit/test_messages.py::test_store_message": 3.16683741199995, "src/backend/tests/unit/test_messages.py::test_update_message_with_content_blocks": 5.128578291973099, "src/backend/tests/unit/test_messages.py::test_update_message_with_nested_properties": 1.5983659149496816, "src/backend/tests/unit/test_messages.py::test_update_message_with_timestamp": 4.5035865410463884, @@ -949,110 +949,110 @@ "src/backend/tests/unit/test_messages.py::test_update_multiple_messages_with_timestamps": 4.659952084010001, "src/backend/tests/unit/test_messages.py::test_update_nonexistent_message": 4.162011249980424, "src/backend/tests/unit/test_messages.py::test_update_single_message": 8.01532608200796, - "src/backend/tests/unit/test_messages_endpoints.py::test_delete_messages": 3.154304209019756, - "src/backend/tests/unit/test_messages_endpoints.py::test_delete_messages_session": 1.92541337601142, - "src/backend/tests/unit/test_messages_endpoints.py::test_no_messages_found_with_given_session_id": 2.601946875016438, - "src/backend/tests/unit/test_messages_endpoints.py::test_successfully_update_session_id": 1.6292902510031126, - "src/backend/tests/unit/test_messages_endpoints.py::test_update_message": 2.3440238340117503, - "src/backend/tests/unit/test_messages_endpoints.py::test_update_message_not_found": 1.8101544990204275, - "src/backend/tests/unit/test_process.py::test_load_langchain_object_with_cached_session": 0.01558779101469554, + "src/backend/tests/unit/test_messages_endpoints.py::test_delete_messages": 3.2429962290000276, + "src/backend/tests/unit/test_messages_endpoints.py::test_delete_messages_session": 3.443264013999965, + "src/backend/tests/unit/test_messages_endpoints.py::test_no_messages_found_with_given_session_id": 3.416533870999956, + "src/backend/tests/unit/test_messages_endpoints.py::test_successfully_update_session_id": 3.4572371529999373, + "src/backend/tests/unit/test_messages_endpoints.py::test_update_message": 3.290185458999872, + "src/backend/tests/unit/test_messages_endpoints.py::test_update_message_not_found": 3.28745589499988, + "src/backend/tests/unit/test_process.py::test_load_langchain_object_with_cached_session": 0.027405468000097244, "src/backend/tests/unit/test_process.py::test_load_langchain_object_with_no_cached_session": 2.9178847920848057, "src/backend/tests/unit/test_process.py::test_load_langchain_object_without_session_id": 2.8941064990358427, - "src/backend/tests/unit/test_process.py::test_multiple_tweaks": 0.0006489579973276705, - "src/backend/tests/unit/test_process.py::test_no_tweaks": 0.0010517920309212059, - "src/backend/tests/unit/test_process.py::test_single_tweak": 0.000503167015267536, - "src/backend/tests/unit/test_process.py::test_tweak_no_node_id": 0.0006343330314848572, - "src/backend/tests/unit/test_process.py::test_tweak_not_in_template": 0.0005131250072736293, - "src/backend/tests/unit/test_schema.py::TestInput::test_field_type_str": 0.0009065009944606572, - "src/backend/tests/unit/test_schema.py::TestInput::test_field_type_type": 0.0006712079921271652, - "src/backend/tests/unit/test_schema.py::TestInput::test_input_to_dict": 0.0005927500023972243, - "src/backend/tests/unit/test_schema.py::TestInput::test_invalid_field_type": 0.0006534989806823432, - "src/backend/tests/unit/test_schema.py::TestInput::test_post_process_type_function": 0.001022209005896002, - "src/backend/tests/unit/test_schema.py::TestInput::test_serialize_field_type": 0.0005160000000614673, - "src/backend/tests/unit/test_schema.py::TestInput::test_validate_type_class": 0.0005296249873936176, - "src/backend/tests/unit/test_schema.py::TestInput::test_validate_type_string": 0.000737375026801601, - "src/backend/tests/unit/test_schema.py::TestOutput::test_output_add_types": 0.0005401670059654862, - "src/backend/tests/unit/test_schema.py::TestOutput::test_output_default": 0.0007704169838689268, - "src/backend/tests/unit/test_schema.py::TestOutput::test_output_set_selected": 0.0005164160393178463, - "src/backend/tests/unit/test_schema.py::TestOutput::test_output_to_dict": 0.0007302080339286476, - "src/backend/tests/unit/test_schema.py::TestOutput::test_output_validate_display_name": 0.0017101260018534958, - "src/backend/tests/unit/test_schema.py::TestOutput::test_output_validate_model": 0.0010426669905427843, - "src/backend/tests/unit/test_schema.py::TestPostProcessType::test_custom_type": 0.0008961650310084224, - "src/backend/tests/unit/test_schema.py::TestPostProcessType::test_int_type": 0.0007076250039972365, - "src/backend/tests/unit/test_schema.py::TestPostProcessType::test_list_custom_type": 0.0006867089832667261, - "src/backend/tests/unit/test_schema.py::TestPostProcessType::test_list_int_type": 0.0014389159914571792, - "src/backend/tests/unit/test_schema.py::TestPostProcessType::test_union_custom_type": 0.0006183749937918037, - "src/backend/tests/unit/test_schema.py::TestPostProcessType::test_union_type": 0.0010281680151820183, - "src/backend/tests/unit/test_setup_superuser.py::test_teardown_superuser_default_superuser": 0.002218792971689254, - "src/backend/tests/unit/test_setup_superuser.py::test_teardown_superuser_no_default_superuser": 0.0215872090193443, - "src/backend/tests/unit/test_telemetry.py::test_gauge": 0.0014027089637238532, - "src/backend/tests/unit/test_telemetry.py::test_gauge_with_counter_method": 0.0008330410055350512, - "src/backend/tests/unit/test_telemetry.py::test_gauge_with_historgram_method": 0.0007057090115267783, - "src/backend/tests/unit/test_telemetry.py::test_gauge_with_up_down_counter_method": 0.000687708001350984, - "src/backend/tests/unit/test_telemetry.py::test_increment_counter": 0.000704000995028764, - "src/backend/tests/unit/test_telemetry.py::test_increment_counter_empty_label": 0.0006923750333953649, - "src/backend/tests/unit/test_telemetry.py::test_increment_counter_missing_mandatory_label": 0.0008031670295167714, - "src/backend/tests/unit/test_telemetry.py::test_increment_counter_unregisted_metric": 0.0006146680098026991, - "src/backend/tests/unit/test_telemetry.py::test_init": 0.0006332920165732503, - "src/backend/tests/unit/test_telemetry.py::test_missing_labels": 0.0008734989969525486, - "src/backend/tests/unit/test_telemetry.py::test_multithreaded_singleton": 0.005841416044859216, - "src/backend/tests/unit/test_telemetry.py::test_multithreaded_singleton_race_condition": 0.3744097919843625, - "src/backend/tests/unit/test_telemetry.py::test_opentelementry_singleton": 0.0005202919710427523, - "src/backend/tests/unit/test_template.py::test_build_template_from_function": 0.0033069999772123992, - "src/backend/tests/unit/test_template.py::test_get_base_classes": 0.000552374025573954, - "src/backend/tests/unit/test_template.py::test_get_default_factory": 0.0005877090152353048, - "src/backend/tests/unit/test_user.py::test_add_user": 1.6184632920194417, - "src/backend/tests/unit/test_user.py::test_data_consistency_after_delete": 2.164943417010363, - "src/backend/tests/unit/test_user.py::test_data_consistency_after_update": 2.415470957988873, - "src/backend/tests/unit/test_user.py::test_deactivated_user_cannot_access": 2.23501787497662, - "src/backend/tests/unit/test_user.py::test_deactivated_user_cannot_login": 1.8409284579975065, - "src/backend/tests/unit/test_user.py::test_delete_user": 1.548597459011944, - "src/backend/tests/unit/test_user.py::test_delete_user_wrong_id": 1.9468178749957588, - "src/backend/tests/unit/test_user.py::test_inactive_user": 1.3642030000046361, - "src/backend/tests/unit/test_user.py::test_normal_user_cant_delete_user": 1.8243713749980088, - "src/backend/tests/unit/test_user.py::test_normal_user_cant_read_all_users": 2.038196041015908, - "src/backend/tests/unit/test_user.py::test_patch_reset_password": 2.620299875008641, - "src/backend/tests/unit/test_user.py::test_patch_user": 1.6100572090072092, - "src/backend/tests/unit/test_user.py::test_patch_user_wrong_id": 2.1514435009739827, - "src/backend/tests/unit/test_user.py::test_read_all_users": 1.6546565419703256, - "src/backend/tests/unit/test_user.py::test_user_waiting_for_approval": 3.1668287909706123, - "src/backend/tests/unit/test_validate_code.py::test_create_class": 0.001028417027555406, - "src/backend/tests/unit/test_validate_code.py::test_create_class_with_external_variables_and_functions": 0.0007450829725712538, - "src/backend/tests/unit/test_validate_code.py::test_create_class_with_multiple_external_classes": 0.0009715420019347221, - "src/backend/tests/unit/test_validate_code.py::test_create_function": 0.0007041249773465097, - "src/backend/tests/unit/test_validate_code.py::test_execute_function_missing_function": 0.0006371659983415157, - "src/backend/tests/unit/test_validate_code.py::test_execute_function_missing_module": 0.000946499960264191, - "src/backend/tests/unit/test_validate_code.py::test_execute_function_missing_schema": 0.0009797500097192824, - "src/backend/tests/unit/test_validate_code.py::test_execute_function_success": 0.000587834045290947, - "src/backend/tests/unit/test_validate_code.py::test_validate_code": 0.0010108750138897449, - "src/backend/tests/unit/test_version.py::test_compute_main": 0.00047774999984540045, - "src/backend/tests/unit/test_version.py::test_version": 0.0006016670085955411, - "src/backend/tests/unit/test_webhook.py::test_webhook_endpoint": 4.034270833974006, - "src/backend/tests/unit/test_webhook.py::test_webhook_flow_on_run_endpoint": 2.5163553740421776, - "src/backend/tests/unit/test_webhook.py::test_webhook_with_random_payload": 3.502802498987876, - "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol::password@host-protocol::password@host]": 0.0006442089797928929, - "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol:user:pa:ss:word@host-protocol:user:pa:ss:word@host]": 0.0006004580354783684, - "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol:user:pa@ss@word@host-protocol:user:pa%40ss%40word@host]": 0.0006087489891797304, - "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol:user:pass@word@host-protocol:user:pass%40word@host]": 0.0005368749843910336, - "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol:user:password@-protocol:user:password@]": 0.0005510420014616102, - "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol:user:password@host-protocol:user:password@host]": 0.0011215830163564533, - "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol:user@host-protocol:user@host]": 0.0006797079695388675, - "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[user:password@host-user:password@host]": 0.0009067920036613941, - "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[-]": 0.0005174170073587447, - "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[/home/user/\\ndocu\\nments/file.txt-/home/user/\\\\ndocu\\\\nments/file.txt]": 0.0007240840059239417, - "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[/home/user/docu\\n\\nments/file.txt-/home/user/docu\\\\n\\\\nments/file.txt]": 0.0006135840085335076, - "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[/home/user/docu\\nments/file.txt-/home/user/docu\\\\nments/file.txt]": 0.0012994589924346656, - "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[/home/user/documents/\\n-/home/user/documents/\\\\n]": 0.0005962919967714697, - "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[/home/user/documents/file.txt-/home/user/documents/file.txt]": 0.0009478749998379499, - "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[/home/user/my-\\ndocs/special_file!.pdf-/home/user/my-\\\\ndocs/special_file!.pdf]": 0.0005841670208610594, - "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[C:/Users\\\\Documents/file.txt-C:/Users\\\\Documents/file.txt]": 0.0005609169893432409, - "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[C:\\\\Users\\\\Documents\\\\-C:\\\\Users\\\\Documents\\\\]": 0.0006535420252475888, - "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[C:\\\\Users\\\\Documents\\\\file.txt-C:\\\\Users\\\\Documents\\\\file.txt]": 0.0005597510025836527, - "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[C:\\\\Users\\\\\\nDocuments\\\\file.txt-C:\\\\Users\\\\\\\\nDocuments\\\\file.txt]": 0.0005942500138189644, - "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[\\\\\\\\server\\\\share\\\\file.txt-\\\\\\\\server\\\\share\\\\file.txt]": 0.0006109579699113965, - "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[\\n/home/user/documents/-\\\\n/home/user/documents/]": 0.0009012499649543315, - "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[\\n\\n\\n-\\\\n\\\\n\\\\n]": 0.0005593330133706331, - "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path_type": 0.0005608339561149478, + "src/backend/tests/unit/test_process.py::test_multiple_tweaks": 0.0008588819999886255, + "src/backend/tests/unit/test_process.py::test_no_tweaks": 0.0010121780001099978, + "src/backend/tests/unit/test_process.py::test_single_tweak": 0.0008738390000644358, + "src/backend/tests/unit/test_process.py::test_tweak_no_node_id": 0.0008586519999198572, + "src/backend/tests/unit/test_process.py::test_tweak_not_in_template": 0.0008640709999099272, + "src/backend/tests/unit/test_schema.py::TestInput::test_field_type_str": 0.0009959769998886259, + "src/backend/tests/unit/test_schema.py::TestInput::test_field_type_type": 0.0009105469998758053, + "src/backend/tests/unit/test_schema.py::TestInput::test_input_to_dict": 0.0008871659998703763, + "src/backend/tests/unit/test_schema.py::TestInput::test_invalid_field_type": 0.0008820659999173586, + "src/backend/tests/unit/test_schema.py::TestInput::test_post_process_type_function": 0.0014080159999139141, + "src/backend/tests/unit/test_schema.py::TestInput::test_serialize_field_type": 0.0008604559999412231, + "src/backend/tests/unit/test_schema.py::TestInput::test_validate_type_class": 0.0008599340000046141, + "src/backend/tests/unit/test_schema.py::TestInput::test_validate_type_string": 0.000855965999903674, + "src/backend/tests/unit/test_schema.py::TestOutput::test_output_add_types": 0.0008651629999576471, + "src/backend/tests/unit/test_schema.py::TestOutput::test_output_default": 0.0008606449999888355, + "src/backend/tests/unit/test_schema.py::TestOutput::test_output_set_selected": 0.0008666549999816198, + "src/backend/tests/unit/test_schema.py::TestOutput::test_output_to_dict": 0.0008784679999962464, + "src/backend/tests/unit/test_schema.py::TestOutput::test_output_validate_display_name": 0.0008741409998265226, + "src/backend/tests/unit/test_schema.py::TestOutput::test_output_validate_model": 0.0008757639998293598, + "src/backend/tests/unit/test_schema.py::TestPostProcessType::test_custom_type": 0.0008476009999185408, + "src/backend/tests/unit/test_schema.py::TestPostProcessType::test_int_type": 0.0008418799999390103, + "src/backend/tests/unit/test_schema.py::TestPostProcessType::test_list_custom_type": 0.0008568189999778042, + "src/backend/tests/unit/test_schema.py::TestPostProcessType::test_list_int_type": 0.0008382430000892782, + "src/backend/tests/unit/test_schema.py::TestPostProcessType::test_union_custom_type": 0.0010996820000173102, + "src/backend/tests/unit/test_schema.py::TestPostProcessType::test_union_type": 0.0008406979999335817, + "src/backend/tests/unit/test_setup_superuser.py::test_teardown_superuser_default_superuser": 0.0025497259999838207, + "src/backend/tests/unit/test_setup_superuser.py::test_teardown_superuser_no_default_superuser": 0.0028463899999451314, + "src/backend/tests/unit/test_telemetry.py::test_gauge": 0.0008838490000471211, + "src/backend/tests/unit/test_telemetry.py::test_gauge_with_counter_method": 0.0009968890000209285, + "src/backend/tests/unit/test_telemetry.py::test_gauge_with_historgram_method": 0.000975038999968092, + "src/backend/tests/unit/test_telemetry.py::test_gauge_with_up_down_counter_method": 0.0009996950000186189, + "src/backend/tests/unit/test_telemetry.py::test_increment_counter": 0.000851948999979868, + "src/backend/tests/unit/test_telemetry.py::test_increment_counter_empty_label": 0.0009558819999710977, + "src/backend/tests/unit/test_telemetry.py::test_increment_counter_missing_mandatory_label": 0.0009855170000037106, + "src/backend/tests/unit/test_telemetry.py::test_increment_counter_unregisted_metric": 0.0009596909999345371, + "src/backend/tests/unit/test_telemetry.py::test_init": 0.0009029950000467579, + "src/backend/tests/unit/test_telemetry.py::test_missing_labels": 0.000923061999969832, + "src/backend/tests/unit/test_telemetry.py::test_multithreaded_singleton": 0.005228230999932748, + "src/backend/tests/unit/test_telemetry.py::test_multithreaded_singleton_race_condition": 0.019170620000068084, + "src/backend/tests/unit/test_telemetry.py::test_opentelementry_singleton": 0.0008702640001274631, + "src/backend/tests/unit/test_template.py::test_build_template_from_function": 0.0036947810000356185, + "src/backend/tests/unit/test_template.py::test_get_base_classes": 0.0009134839999660471, + "src/backend/tests/unit/test_template.py::test_get_default_factory": 0.0009969090000367942, + "src/backend/tests/unit/test_user.py::test_add_user": 3.437542764, + "src/backend/tests/unit/test_user.py::test_data_consistency_after_delete": 3.700971078000066, + "src/backend/tests/unit/test_user.py::test_data_consistency_after_update": 3.4211229790000743, + "src/backend/tests/unit/test_user.py::test_deactivated_user_cannot_access": 5.593178784999964, + "src/backend/tests/unit/test_user.py::test_deactivated_user_cannot_login": 3.4531719659999, + "src/backend/tests/unit/test_user.py::test_delete_user": 3.5128745660000504, + "src/backend/tests/unit/test_user.py::test_delete_user_wrong_id": 3.583569442000112, + "src/backend/tests/unit/test_user.py::test_inactive_user": 3.404797406000057, + "src/backend/tests/unit/test_user.py::test_normal_user_cant_delete_user": 5.996112846999949, + "src/backend/tests/unit/test_user.py::test_normal_user_cant_read_all_users": 5.934024692999969, + "src/backend/tests/unit/test_user.py::test_patch_reset_password": 3.5108865399998876, + "src/backend/tests/unit/test_user.py::test_patch_user": 3.425097598999969, + "src/backend/tests/unit/test_user.py::test_patch_user_wrong_id": 3.373163040999998, + "src/backend/tests/unit/test_user.py::test_read_all_users": 3.5040830659999074, + "src/backend/tests/unit/test_user.py::test_user_waiting_for_approval": 3.3791692410001133, + "src/backend/tests/unit/test_validate_code.py::test_create_class": 0.0014745919999086254, + "src/backend/tests/unit/test_validate_code.py::test_create_class_with_external_variables_and_functions": 0.0013525430000527194, + "src/backend/tests/unit/test_validate_code.py::test_create_class_with_multiple_external_classes": 0.00145554599987463, + "src/backend/tests/unit/test_validate_code.py::test_create_function": 0.0014833970000154295, + "src/backend/tests/unit/test_validate_code.py::test_execute_function_missing_function": 0.0010604180000655106, + "src/backend/tests/unit/test_validate_code.py::test_execute_function_missing_module": 0.0012644299999919895, + "src/backend/tests/unit/test_validate_code.py::test_execute_function_missing_schema": 0.0013691329999119262, + "src/backend/tests/unit/test_validate_code.py::test_execute_function_success": 0.0011405179998291715, + "src/backend/tests/unit/test_validate_code.py::test_validate_code": 0.001434336000102121, + "src/backend/tests/unit/test_version.py::test_compute_main": 0.0009202270000514545, + "src/backend/tests/unit/test_version.py::test_version": 0.0009197349999112703, + "src/backend/tests/unit/test_webhook.py::test_webhook_endpoint": 3.445863824000071, + "src/backend/tests/unit/test_webhook.py::test_webhook_flow_on_run_endpoint": 3.340415212000039, + "src/backend/tests/unit/test_webhook.py::test_webhook_with_random_payload": 3.4189955110000483, + "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol::password@host-protocol::password@host]": 0.0008917630000269128, + "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol:user:pa:ss:word@host-protocol:user:pa:ss:word@host]": 0.0009013710000544961, + "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol:user:pa@ss@word@host-protocol:user:pa%40ss%40word@host]": 0.0008942879999267461, + "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol:user:pass@word@host-protocol:user:pass%40word@host]": 0.0009052080000628848, + "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol:user:password@-protocol:user:password@]": 0.0008630489999177371, + "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol:user:password@host-protocol:user:password@host]": 0.0012955060000194862, + "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol:user@host-protocol:user@host]": 0.000915680000048269, + "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[user:password@host-user:password@host]": 0.0008877669999947102, + "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[-]": 0.0008802819999118583, + "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[/home/user/\\ndocu\\nments/file.txt-/home/user/\\\\ndocu\\\\nments/file.txt]": 0.0008758119998901748, + "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[/home/user/docu\\n\\nments/file.txt-/home/user/docu\\\\n\\\\nments/file.txt]": 0.0008862030001637322, + "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[/home/user/docu\\nments/file.txt-/home/user/docu\\\\nments/file.txt]": 0.0008635200000526311, + "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[/home/user/documents/\\n-/home/user/documents/\\\\n]": 0.0008751120000169976, + "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[/home/user/documents/file.txt-/home/user/documents/file.txt]": 0.0008729379999294906, + "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[/home/user/my-\\ndocs/special_file!.pdf-/home/user/my-\\\\ndocs/special_file!.pdf]": 0.0008776380000199424, + "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[C:/Users\\\\Documents/file.txt-C:/Users\\\\Documents/file.txt]": 0.0008676879999711673, + "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[C:\\\\Users\\\\Documents\\\\-C:\\\\Users\\\\Documents\\\\]": 0.0008551230000648502, + "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[C:\\\\Users\\\\Documents\\\\file.txt-C:\\\\Users\\\\Documents\\\\file.txt]": 0.0008755339999879652, + "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[C:\\\\Users\\\\\\nDocuments\\\\file.txt-C:\\\\Users\\\\\\\\nDocuments\\\\file.txt]": 0.0008734399999639209, + "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[\\\\\\\\server\\\\share\\\\file.txt-\\\\\\\\server\\\\share\\\\file.txt]": 0.000873648999913712, + "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[\\n/home/user/documents/-\\\\n/home/user/documents/]": 0.0008571989999381913, + "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path[\\n\\n\\n-\\\\n\\\\n\\\\n]": 0.0008830680000073698, + "src/backend/tests/unit/utils/test_format_directory_path.py::test_format_directory_path_type": 0.0008074060000353711, "src/backend/tests/unit/utils/test_image_utils.py::TestImageUtils::test_convert_image_to_base64_directory": 0.002373834024183452, "src/backend/tests/unit/utils/test_image_utils.py::TestImageUtils::test_convert_image_to_base64_empty_path": 0.0015134999412111938, "src/backend/tests/unit/utils/test_image_utils.py::TestImageUtils::test_convert_image_to_base64_nonexistent_file": 0.0014794580056332052, @@ -1061,74 +1061,74 @@ "src/backend/tests/unit/utils/test_image_utils.py::TestImageUtils::test_create_data_url_success": 0.0014539569965563715, "src/backend/tests/unit/utils/test_image_utils.py::TestImageUtils::test_create_data_url_unrecognized_extension": 0.0038709990330971777, "src/backend/tests/unit/utils/test_image_utils.py::TestImageUtils::test_create_data_url_with_custom_mime": 0.0027264999807812274, - "src/backend/tests/unit/utils/test_image_utils.py::test_convert_image_to_base64_directory": 0.0019761240109801292, - "src/backend/tests/unit/utils/test_image_utils.py::test_convert_image_to_base64_empty_path": 0.002518124005291611, - "src/backend/tests/unit/utils/test_image_utils.py::test_convert_image_to_base64_nonexistent_file": 0.0006649170245509595, - "src/backend/tests/unit/utils/test_image_utils.py::test_convert_image_to_base64_success": 0.0013485839881468564, - "src/backend/tests/unit/utils/test_image_utils.py::test_create_data_url_invalid_file": 0.0006480820011347532, - "src/backend/tests/unit/utils/test_image_utils.py::test_create_data_url_success": 0.0012157510209362954, - "src/backend/tests/unit/utils/test_image_utils.py::test_create_data_url_unrecognized_extension": 0.0009680840303190053, - "src/backend/tests/unit/utils/test_image_utils.py::test_create_data_url_with_custom_mime": 0.0015612509741913527, - "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[-]": 0.0006802089919801801, - "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[/home/user/\\ndocu\\nments/file.txt-/home/user/\\\\ndocu\\\\nments/file.txt]": 0.0005367079575080425, - "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[/home/user/docu\\n\\nments/file.txt-/home/user/docu\\\\n\\\\nments/file.txt]": 0.000562499015359208, - "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[/home/user/docu\\nments/file.txt-/home/user/docu\\\\nments/file.txt]": 0.0006366659654304385, - "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[/home/user/documents/\\n-/home/user/documents/\\\\n]": 0.0005623750039376318, - "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[/home/user/documents/file.txt-/home/user/documents/file.txt]": 0.0005294170114211738, - "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[/home/user/my-\\ndocs/special_file!.pdf-/home/user/my-\\\\ndocs/special_file!.pdf]": 0.0020117909880355, - "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[C:\\\\Users\\\\\\nDocuments\\\\file.txt-C:\\\\Users\\\\\\\\nDocuments\\\\file.txt]": 0.0006747930019628257, - "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[\\n/home/user/documents/-\\\\n/home/user/documents/]": 0.0010190829925704747, - "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[\\n\\n\\n-\\\\n\\\\n\\\\n]": 0.0008732079586479813, - "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path_type": 0.0005145840113982558, - "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_negative_max_length": 0.0005885009886696935, - "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[-5-]": 0.0006528749945573509, - "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[12345-3-12345]": 0.0005964999727439135, - "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[3.141592653589793-4-3.141592653589793]": 0.0006173750152811408, - "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[None-5-None]": 0.000619249971350655, - "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[True-2-True]": 0.0006434170063585043, - "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[\\u3053\\u3093\\u306b\\u3061\\u306f-3-\\u3053\\u3093\\u306b...]": 0.0006980420148465782, - "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[a-1-a]": 0.0007920829812064767, - "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-10-aaaaaaaaaa...]": 0.0005845840205438435, - "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[exact-5-exact]": 0.0005717489984817803, - "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[long string-7-long st...]": 0.0006011250079609454, - "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[short string-20-short string]": 0.0006571249687112868, - "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_none_max_length": 0.0006093749834690243, - "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_zero_max_length": 0.0005185829941183329, - "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data0-10-expected0]": 0.0005471259937621653, - "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data1-5-expected1]": 0.001377915992634371, - "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data2-7-expected2]": 0.0006472509994637221, - "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data3-8-expected3]": 0.0005914589855819941, - "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data4-10-expected4]": 0.0005243340274319053, - "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data5-10-expected5]": 0.0006965000065974891, - "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data6-10-expected6]": 0.0006244159885682166, - "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data7-5-expected7]": 0.000745333032682538, - "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data8-3-expected8]": 0.0006331659678835422, - "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data9-10-expected9]": 0.002042792009888217, - "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings_default_max_length": 0.0009171249694190919, - "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings_in_place_modification": 0.0006229579739738256, - "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings_invalid_input": 0.0006900829903315753, - "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings_negative_max_length": 0.0005880840471945703, - "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings_no_modification": 0.0008267069933936, - "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings_small_max_length": 0.0016065840027295053, - "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings_type_preservation": 0.0007182509871199727, - "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings_zero_max_length": 0.0004997499927412719, - "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[ invalid -False]": 0.0005580839933827519, - "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[-False]": 0.0006215419853106141, - "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[None-False]": 0.0006731670291628689, - "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[invalid://:@/test-False]": 0.013715749984839931, - "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[invalid://database-False]": 0.01648833297076635, - "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[mysql+mysqldb://scott:tiger@localhost/foo-True]": 0.001174208999145776, - "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[mysql+pymysql://scott:tiger@localhost/foo-True]": 0.0006669999856967479, - "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[mysql://user:pass@localhost/dbname-True]": 0.026843416009796783, - "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[not_a_url-False]": 0.0006165420345496386, - "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[oracle+cx_oracle://scott:tiger@tnsalias-True]": 0.0006687089626211673, - "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[oracle+oracledb://scott:tiger@127.0.0.1:1521/?service_name=freepdb1-True]": 0.0006226249970495701, - "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[oracle://scott:tiger@127.0.0.1:1521/?service_name=freepdb1-True]": 0.01509362502838485, - "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[postgresql+pg8000://dbuser:kx%40jj5%2Fg@pghost10/appdb-True]": 0.0011051659821532667, - "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[postgresql+psycopg2://scott:tiger@localhost:5432/mydatabase-True]": 0.0008826660050544888, - "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[postgresql://user:pass@localhost/dbname-True]": 0.0007817089790478349, - "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[sqlite+aiosqlite:////var/folders/test.db-True]": 0.0008707080269232392, - "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[sqlite:////var/folders/test.db-True]": 0.0008320000197272748, - "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[sqlite:///:memory:-True]": 0.000843376008560881, - "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[sqlite:///test.db-True]": 0.0008358330233022571 + "src/backend/tests/unit/utils/test_image_utils.py::test_convert_image_to_base64_directory": 0.0013072770000235323, + "src/backend/tests/unit/utils/test_image_utils.py::test_convert_image_to_base64_empty_path": 0.0009359950001908146, + "src/backend/tests/unit/utils/test_image_utils.py::test_convert_image_to_base64_nonexistent_file": 0.0009410469999693305, + "src/backend/tests/unit/utils/test_image_utils.py::test_convert_image_to_base64_success": 0.0014837069999202868, + "src/backend/tests/unit/utils/test_image_utils.py::test_create_data_url_invalid_file": 0.0010920059999079967, + "src/backend/tests/unit/utils/test_image_utils.py::test_create_data_url_success": 0.0014452860000346845, + "src/backend/tests/unit/utils/test_image_utils.py::test_create_data_url_unrecognized_extension": 0.0013994400000001406, + "src/backend/tests/unit/utils/test_image_utils.py::test_create_data_url_with_custom_mime": 0.0014195679999602362, + "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[-]": 0.0008879460001480766, + "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[/home/user/\\ndocu\\nments/file.txt-/home/user/\\\\ndocu\\\\nments/file.txt]": 0.0008834590000788012, + "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[/home/user/docu\\n\\nments/file.txt-/home/user/docu\\\\n\\\\nments/file.txt]": 0.0008764039998823137, + "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[/home/user/docu\\nments/file.txt-/home/user/docu\\\\nments/file.txt]": 0.0008741399999507848, + "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[/home/user/documents/\\n-/home/user/documents/\\\\n]": 0.0009025730000757903, + "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[/home/user/documents/file.txt-/home/user/documents/file.txt]": 0.0009005590001152086, + "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[/home/user/my-\\ndocs/special_file!.pdf-/home/user/my-\\\\ndocs/special_file!.pdf]": 0.0008906400001933434, + "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[C:\\\\Users\\\\\\nDocuments\\\\file.txt-C:\\\\Users\\\\\\\\nDocuments\\\\file.txt]": 0.0008802810001498074, + "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[\\n/home/user/documents/-\\\\n/home/user/documents/]": 0.0008601629998565841, + "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path[\\n\\n\\n-\\\\n\\\\n\\\\n]": 0.0008873940000739822, + "src/backend/tests/unit/utils/test_rewrite_file_path.py::test_format_directory_path_type": 0.0008076860000301167, + "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_negative_max_length": 0.0008210230000713636, + "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[-5-]": 0.0009304650001240589, + "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[12345-3-12345]": 0.0009258969998882094, + "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[3.141592653589793-4-3.141592653589793]": 0.0009098069999708969, + "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[None-5-None]": 0.0008924940000269999, + "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[True-2-True]": 0.0009243030000334329, + "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[\\u3053\\u3093\\u306b\\u3061\\u306f-3-\\u3053\\u3093\\u306b...]": 0.0009383010000192371, + "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[a-1-a]": 0.0009198560001095757, + "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-10-aaaaaaaaaa...]": 0.0009241239999937534, + "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[exact-5-exact]": 0.0009116320001112399, + "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[long string-7-long st...]": 0.001096144999905846, + "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_non_dict_list[short string-20-short string]": 0.0009111390000953179, + "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_none_max_length": 0.000857851000091614, + "src/backend/tests/unit/utils/test_truncate_long_strings.py::test_truncate_long_strings_zero_max_length": 0.0007898119999936171, + "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data0-10-expected0]": 0.0009220710001045518, + "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data1-5-expected1]": 0.0009067209999784609, + "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data2-7-expected2]": 0.0009148350001169092, + "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data3-8-expected3]": 0.0010652869999603354, + "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data4-10-expected4]": 0.0009185730000353942, + "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data5-10-expected5]": 0.0009151070000825712, + "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data6-10-expected6]": 0.0009297750000314409, + "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data7-5-expected7]": 0.000896893000003729, + "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data8-3-expected8]": 0.0009559430000081193, + "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings[input_data9-10-expected9]": 0.0014203389999920546, + "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings_default_max_length": 0.0008467589999554548, + "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings_in_place_modification": 0.000815822000049593, + "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings_invalid_input": 0.0008113019999882454, + "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings_negative_max_length": 0.0007971659999839176, + "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings_no_modification": 0.0008168220000470683, + "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings_small_max_length": 0.0007961850001265702, + "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings_type_preservation": 0.0008197779999363775, + "src/backend/tests/unit/utils/test_truncate_long_strings_on_objects.py::test_truncate_long_strings_zero_max_length": 0.0008255600000666163, + "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[ invalid -False]": 0.0009044370000310664, + "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[-False]": 0.00091371500002424, + "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[None-False]": 0.0008920449998868207, + "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[invalid://:@/test-False]": 0.02596106199996484, + "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[invalid://database-False]": 0.02800130800005718, + "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[mysql+mysqldb://scott:tiger@localhost/foo-True]": 0.0011573979999184303, + "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[mysql+pymysql://scott:tiger@localhost/foo-True]": 0.0010323249999828477, + "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[mysql://user:pass@localhost/dbname-True]": 0.04878292000012152, + "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[not_a_url-False]": 0.0009126409998998497, + "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[oracle+cx_oracle://scott:tiger@tnsalias-True]": 0.001036403000057362, + "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[oracle+oracledb://scott:tiger@127.0.0.1:1521/?service_name=freepdb1-True]": 0.0009899980000227515, + "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[oracle://scott:tiger@127.0.0.1:1521/?service_name=freepdb1-True]": 0.045242965999932494, + "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[postgresql+pg8000://dbuser:kx%40jj5%2Fg@pghost10/appdb-True]": 0.0012391609998303466, + "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[postgresql+psycopg2://scott:tiger@localhost:5432/mydatabase-True]": 0.0009334610000450994, + "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[postgresql://user:pass@localhost/dbname-True]": 0.0009484690000363116, + "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[sqlite+aiosqlite:////var/folders/test.db-True]": 0.0009028029999171849, + "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[sqlite:////var/folders/test.db-True]": 0.0009348020000743418, + "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[sqlite:///:memory:-True]": 0.0008976439999059949, + "src/backend/tests/unit/utils/test_util_strings.py::test_is_valid_database_url[sqlite:///test.db-True]": 0.000948028000038903 } \ No newline at end of file From 99f2ef6115ed159468acf323221995f000ee7b93 Mon Sep 17 00:00:00 2001 From: Gabriel Luiz Freitas Almeida Date: Thu, 16 Jan 2025 15:39:34 -0300 Subject: [PATCH 13/22] feat: add truncation to ResultDataResponse (#5704) * chore: Update dependencies and improve platform markers in configuration files - Added 'hypothesis' version 6.123.17 to dev-dependencies in pyproject.toml. - Updated platform markers from 'sys_platform' to 'platform_system' for better compatibility in uv.lock, affecting multiple packages including 'jinxed', 'colorama', and 'appnope'. - Ensured consistency in platform checks across various dependencies to enhance cross-platform support. This update improves the project's dependency management and ensures better compatibility across different operating systems. * feat: Enhance ResultDataResponse serialization with truncation support - Introduced a new method `_serialize_and_truncate` to handle serialization and truncation of various data types, including strings, bytes, datetime, Decimal, UUID, and BaseModel instances. - Updated the `serialize_results` method to utilize the new truncation logic for both individual results and dictionary outputs. - Enhanced the `serialize_model` method to ensure all relevant fields are serialized and truncated according to the defined maximum text length. This update improves the handling of large data outputs, ensuring that responses remain concise and manageable. * fix: Reduce MAX_TEXT_LENGTH in constants.py from 99999 to 20000 This change lowers the maximum text length limit to improve data handling and ensure more manageable output sizes across the application. * test: Add comprehensive unit tests for ResultDataResponse and VertexBuildResponse - Introduced a new test suite in `test_api_schemas.py` to validate the serialization and truncation behavior of `ResultDataResponse` and `VertexBuildResponse`. - Implemented tests for handling long strings, special data types, nested structures, and combined fields, ensuring proper serialization and truncation. - Enhanced coverage for logging and output handling, verifying that all fields are correctly processed and truncated as per the defined maximum text length. - Utilized Hypothesis for property-based testing to ensure robustness and reliability of the serialization logic. This update significantly improves the test coverage for the API response schemas, ensuring better data handling and output management. --- pyproject.toml | 1 + src/backend/base/langflow/api/v1/schemas.py | 72 +++- src/backend/base/langflow/utils/constants.py | 2 +- .../tests/unit/api/v1/test_api_schemas.py | 316 ++++++++++++++++++ uv.lock | 52 ++- 5 files changed, 413 insertions(+), 30 deletions(-) create mode 100644 src/backend/tests/unit/api/v1/test_api_schemas.py diff --git a/pyproject.toml b/pyproject.toml index 674f8f51056b..929187ae19ec 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -186,6 +186,7 @@ dev-dependencies = [ "blockbuster>=1.5.8,<1.6", "types-aiofiles>=24.1.0.20240626", "codeflash>=0.8.4", + "hypothesis>=6.123.17", ] diff --git a/src/backend/base/langflow/api/v1/schemas.py b/src/backend/base/langflow/api/v1/schemas.py index 20ab9c668a2e..96b21e3e3b30 100644 --- a/src/backend/base/langflow/api/v1/schemas.py +++ b/src/backend/base/langflow/api/v1/schemas.py @@ -1,20 +1,13 @@ from datetime import datetime, timezone +from decimal import Decimal from enum import Enum from pathlib import Path from typing import Any from uuid import UUID -from pydantic import ( - BaseModel, - ConfigDict, - Field, - field_serializer, - field_validator, - model_serializer, -) +from pydantic import BaseModel, ConfigDict, Field, field_serializer, field_validator, model_serializer from langflow.graph.schema import RunOutputs -from langflow.graph.utils import serialize_field from langflow.schema import dotdict from langflow.schema.graph import Tweaks from langflow.schema.schema import InputType, OutputType, OutputValue @@ -24,6 +17,7 @@ from langflow.services.database.models.user import UserRead from langflow.services.settings.feature_flags import FeatureFlags from langflow.services.tracing.schema import Log +from langflow.utils.constants import MAX_TEXT_LENGTH from langflow.utils.util_strings import truncate_long_strings @@ -275,9 +269,65 @@ class ResultDataResponse(BaseModel): @field_serializer("results") @classmethod def serialize_results(cls, v): + """Serialize results with custom handling for special types and truncation.""" if isinstance(v, dict): - return {key: serialize_field(val) for key, val in v.items()} - return serialize_field(v) + return {key: cls._serialize_and_truncate(val, max_length=MAX_TEXT_LENGTH) for key, val in v.items()} + return cls._serialize_and_truncate(v, max_length=MAX_TEXT_LENGTH) + + @staticmethod + def _serialize_and_truncate(obj: Any, max_length: int = MAX_TEXT_LENGTH) -> Any: + """Helper method to serialize and truncate values.""" + if isinstance(obj, bytes): + obj = obj.decode("utf-8", errors="ignore") + if len(obj) > max_length: + return f"{obj[:max_length]}... [truncated]" + return obj + if isinstance(obj, str): + if len(obj) > max_length: + return f"{obj[:max_length]}... [truncated]" + return obj + if isinstance(obj, datetime): + return obj.astimezone().isoformat() + if isinstance(obj, Decimal): + return float(obj) + if isinstance(obj, UUID): + return str(obj) + if isinstance(obj, OutputValue | Log): + # First serialize the model + serialized = obj.model_dump() + # Then recursively truncate all values in the serialized dict + for key, value in serialized.items(): + # Handle string values directly to ensure proper truncation + if isinstance(value, str) and len(value) > max_length: + serialized[key] = f"{value[:max_length]}... [truncated]" + else: + serialized[key] = ResultDataResponse._serialize_and_truncate(value, max_length=max_length) + return serialized + if isinstance(obj, BaseModel): + # For other BaseModel instances, serialize all fields + serialized = obj.model_dump() + return { + k: ResultDataResponse._serialize_and_truncate(v, max_length=max_length) for k, v in serialized.items() + } + if isinstance(obj, dict): + return {k: ResultDataResponse._serialize_and_truncate(v, max_length=max_length) for k, v in obj.items()} + if isinstance(obj, list | tuple): + return [ResultDataResponse._serialize_and_truncate(item, max_length=max_length) for item in obj] + return obj + + @model_serializer(mode="plain") + def serialize_model(self) -> dict: + """Custom serializer for the entire model.""" + return { + "results": self.serialize_results(self.results), + "outputs": self._serialize_and_truncate(self.outputs, max_length=MAX_TEXT_LENGTH), + "logs": self._serialize_and_truncate(self.logs, max_length=MAX_TEXT_LENGTH), + "message": self._serialize_and_truncate(self.message, max_length=MAX_TEXT_LENGTH), + "artifacts": self._serialize_and_truncate(self.artifacts, max_length=MAX_TEXT_LENGTH), + "timedelta": self.timedelta, + "duration": self.duration, + "used_frozen_result": self.used_frozen_result, + } class VertexBuildResponse(BaseModel): diff --git a/src/backend/base/langflow/utils/constants.py b/src/backend/base/langflow/utils/constants.py index 1b720652adc7..21e39fcc758b 100644 --- a/src/backend/base/langflow/utils/constants.py +++ b/src/backend/base/langflow/utils/constants.py @@ -185,4 +185,4 @@ def python_function(text: str) -> str: MESSAGE_SENDER_NAME_AI = "AI" MESSAGE_SENDER_NAME_USER = "User" -MAX_TEXT_LENGTH = 99999 +MAX_TEXT_LENGTH = 20000 diff --git a/src/backend/tests/unit/api/v1/test_api_schemas.py b/src/backend/tests/unit/api/v1/test_api_schemas.py new file mode 100644 index 000000000000..26844eb6ccd5 --- /dev/null +++ b/src/backend/tests/unit/api/v1/test_api_schemas.py @@ -0,0 +1,316 @@ +from datetime import datetime, timezone + +from hypothesis import HealthCheck, example, given, settings +from hypothesis import strategies as st +from langflow.api.v1.schemas import ResultDataResponse, VertexBuildResponse +from langflow.schema.schema import OutputValue +from langflow.services.tracing.schema import Log +from pydantic import BaseModel + +# Use a smaller test size for hypothesis +TEST_TEXT_LENGTH = 50 + + +class SampleBaseModel(BaseModel): + name: str + value: int + + +@given(st.text(min_size=TEST_TEXT_LENGTH + 1, max_size=TEST_TEXT_LENGTH * 2)) +@settings(max_examples=10) +def test_result_data_response_truncation(long_string): + """Test that ResultDataResponse properly truncates long strings.""" + response = ResultDataResponse( + results={"long_text": long_string}, + message={"text": long_string}, + ) + + response.serialize_model() + truncated = response._serialize_and_truncate(long_string, max_length=TEST_TEXT_LENGTH) + assert len(truncated) <= TEST_TEXT_LENGTH + len("... [truncated]") + assert "... [truncated]" in truncated + + +@given( + st.uuids(), + st.datetimes(timezones=st.just(timezone.utc)), + st.decimals(min_value="-1e6", max_value="1e6"), + st.text(min_size=1), + st.integers(), +) +@settings(max_examples=10) +def test_result_data_response_special_types(uuid, dt, decimal, name, value): + """Test that ResultDataResponse properly handles special data types.""" + test_model = SampleBaseModel(name=name, value=value) + + response = ResultDataResponse( + results={ + "uuid": uuid, + "datetime": dt, + "decimal": decimal, + "model": test_model, + } + ) + + serialized = response.serialize_model() + assert serialized["results"]["uuid"] == str(uuid) + # Compare timezone-aware datetimes + assert datetime.fromisoformat(serialized["results"]["datetime"]).astimezone(timezone.utc) == dt + assert isinstance(serialized["results"]["decimal"], float) + assert serialized["results"]["model"] == {"name": name, "value": value} + + +@given( + st.lists(st.text(min_size=TEST_TEXT_LENGTH + 1, max_size=TEST_TEXT_LENGTH * 2), min_size=1, max_size=2), + st.dictionaries( + keys=st.text(min_size=1, max_size=10), + values=st.text(min_size=TEST_TEXT_LENGTH + 1, max_size=TEST_TEXT_LENGTH * 2), + min_size=1, + max_size=2, + ), +) +@settings(max_examples=5, suppress_health_check=[HealthCheck.too_slow, HealthCheck.large_base_example]) +def test_result_data_response_nested_structures(long_list, long_dict): + """Test that ResultDataResponse handles nested structures correctly.""" + nested_data = { + "list": long_list, + "dict": long_dict, + } + + response = ResultDataResponse(results=nested_data) + serialized = response._serialize_and_truncate(nested_data, max_length=TEST_TEXT_LENGTH) + + # Check list items + for item in serialized["list"]: + assert len(item) <= TEST_TEXT_LENGTH + len("... [truncated]") + if len(item) > TEST_TEXT_LENGTH: + assert "... [truncated]" in item + + # Check dict values + for val in serialized["dict"].values(): + assert len(val) <= TEST_TEXT_LENGTH + len("... [truncated]") + if len(val) > TEST_TEXT_LENGTH: + assert "... [truncated]" in val + + +@given( + st.dictionaries( + keys=st.text(min_size=1, max_size=5), + values=st.text(min_size=TEST_TEXT_LENGTH + 1, max_size=TEST_TEXT_LENGTH * 2), + min_size=1, + max_size=2, + ), +) +@settings(max_examples=10) +@example( + outputs_dict={"0": "000000000000000000000000000000000000000000000000000"}, +).via("discovered failure") +@example( + outputs_dict={"0": "000000000000000000000000000000000000000000000000000000000000000000"}, +).via("discovered failure") +def test_result_data_response_outputs(outputs_dict): + """Test that ResultDataResponse properly handles and truncates outputs.""" + # Create OutputValue objects with potentially long messages + outputs = {key: OutputValue(type="text", message=value) for key, value in outputs_dict.items()} + + response = ResultDataResponse(outputs=outputs) + serialized = ResultDataResponse._serialize_and_truncate(response, max_length=TEST_TEXT_LENGTH) + + # Check outputs are properly serialized and truncated + for key, value in outputs_dict.items(): + assert key in serialized["outputs"] + serialized_output = serialized["outputs"][key] + assert serialized_output["type"] == "text" + + # Check message truncation + message = serialized_output["message"] + assert len(message) <= TEST_TEXT_LENGTH + len("... [truncated]"), f"Message length: {len(message)}" + if len(value) > TEST_TEXT_LENGTH: + assert "... [truncated]" in message + assert message.startswith(value[:TEST_TEXT_LENGTH]) + else: + assert message == value + + +@given( + st.lists( + st.text(min_size=TEST_TEXT_LENGTH + 1, max_size=TEST_TEXT_LENGTH * 2), + min_size=1, + max_size=3, + ), +) +@settings(max_examples=10) +@example( + log_messages=["000000000000000000000000000000000000000000000000000"], +).via("discovered failure") +def test_result_data_response_logs(log_messages): + """Test that ResultDataResponse properly handles and truncates logs.""" + # Create logs with long messages + logs = { + "test_node": [ + Log( + message=msg, + name="test_log", + type="test", + ) + for msg in log_messages + ] + } + + response = ResultDataResponse(logs=logs) + serialized = ResultDataResponse._serialize_and_truncate(response, max_length=TEST_TEXT_LENGTH) + + # Check logs are properly serialized and truncated + assert "test_node" in serialized["logs"] + serialized_logs = serialized["logs"]["test_node"] + + for i, log_msg in enumerate(log_messages): + serialized_log = serialized_logs[i] + assert serialized_log["name"] == "test_log" + assert serialized_log["type"] == "test" + + # Check message truncation + message = serialized_log["message"] + assert len(message) <= TEST_TEXT_LENGTH + len("... [truncated]") + if len(log_msg) > TEST_TEXT_LENGTH: + assert "... [truncated]" in message + assert message.startswith(log_msg[:TEST_TEXT_LENGTH]) + else: + assert message == log_msg + + +@given( + st.dictionaries( + keys=st.text(min_size=1, max_size=5), + values=st.text(min_size=TEST_TEXT_LENGTH + 1, max_size=TEST_TEXT_LENGTH * 2), + min_size=1, + max_size=2, + ), + st.lists( + st.text(min_size=TEST_TEXT_LENGTH + 1, max_size=TEST_TEXT_LENGTH * 2), + min_size=1, + max_size=3, + ), +) +@settings(max_examples=10) +@example( + outputs_dict={"0": "000000000000000000000000000000000000000000000000000000000000000000"}, + log_messages=["000000000000000000000000000000000000000000000000000"], +).via("discovered failure") +@example( + outputs_dict={"0": "000000000000000000000000000000000000000000000000000"}, + log_messages=["000000000000000000000000000000000000000000000000000"], +).via("discovered failure") +def test_result_data_response_combined_fields(outputs_dict, log_messages): + """Test that ResultDataResponse properly handles all fields together.""" + # Create OutputValue objects with potentially long messages + outputs = {key: OutputValue(type="text", message=value) for key, value in outputs_dict.items()} + + # Create logs with long messages + logs = { + "test_node": [ + Log( + message=msg, + name="test_log", + type="test", + ) + for msg in log_messages + ] + } + + response = ResultDataResponse( + outputs=outputs, + logs=logs, + results={"test": "value"}, + message={"text": "test"}, + artifacts={"file": "test.txt"}, + ) + serialized = ResultDataResponse._serialize_and_truncate(response, max_length=TEST_TEXT_LENGTH) + + # Check all fields are present + assert "outputs" in serialized + assert "logs" in serialized + assert "results" in serialized + assert "message" in serialized + assert "artifacts" in serialized + + # Check outputs truncation + for key, value in outputs_dict.items(): + assert key in serialized["outputs"] + serialized_output = serialized["outputs"][key] + assert serialized_output["type"] == "text" + + # Check message truncation + message = serialized_output["message"] + if len(value) > TEST_TEXT_LENGTH: + assert len(message) <= TEST_TEXT_LENGTH + len("... [truncated]") + assert "... [truncated]" in message + else: + assert message == value + + # Check logs truncation + assert "test_node" in serialized["logs"] + serialized_logs = serialized["logs"]["test_node"] + + for i, log_msg in enumerate(log_messages): + serialized_log = serialized_logs[i] + assert serialized_log["name"] == "test_log" + assert serialized_log["type"] == "test" + + # Check message truncation + message = serialized_log["message"] + if len(log_msg) > TEST_TEXT_LENGTH: + assert len(message) <= TEST_TEXT_LENGTH + len("... [truncated]") + assert "... [truncated]" in message + else: + assert message == log_msg + + +@given( + st.text(min_size=1), # build_id + st.lists(st.text()), # logs + st.text(min_size=1), # message +) +@settings(max_examples=10) +def test_vertex_build_response_serialization(build_id, log_messages, test_message): + """Test that VertexBuildResponse properly serializes its data field.""" + logs = [Log(message=msg, name="test_log", type="test") for msg in log_messages] + + result_data = ResultDataResponse( + results={"test": test_message}, + message={"text": test_message}, + logs={"node1": logs}, + ) + + response = VertexBuildResponse( + id=build_id, + valid=True, + data=result_data, + ) + + serialized = response.model_dump() + assert serialized["id"] == build_id + assert serialized["valid"] is True + assert isinstance(serialized["data"], dict) + assert serialized["data"]["results"]["test"] == test_message + + +@given(st.text(min_size=TEST_TEXT_LENGTH + 1, max_size=TEST_TEXT_LENGTH * 2)) +@settings(max_examples=10) +def test_vertex_build_response_with_long_data(long_string): + """Test that VertexBuildResponse properly handles long data in its data field.""" + result_data = ResultDataResponse( + results={"long_text": long_string}, + message={"text": long_string}, + ) + + response = VertexBuildResponse( + id="test-id", + valid=True, + data=result_data, + ) + + response.model_dump() + truncated = result_data._serialize_and_truncate(long_string, max_length=TEST_TEXT_LENGTH) + assert len(truncated) <= TEST_TEXT_LENGTH + len("... [truncated]") + assert "... [truncated]" in truncated diff --git a/uv.lock b/uv.lock index 8d4a487c1c2c..d50afab6e1a4 100644 --- a/uv.lock +++ b/uv.lock @@ -532,7 +532,7 @@ name = "blessed" version = "1.20.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "jinxed", marker = "sys_platform == 'win32'" }, + { name = "jinxed", marker = "platform_system == 'Windows'" }, { name = "six" }, { name = "wcwidth" }, ] @@ -954,7 +954,7 @@ name = "click" version = "8.1.8" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "colorama", marker = "platform_system == 'Windows'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593 } wheels = [ @@ -3000,6 +3000,20 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d7/de/85a784bcc4a3779d1753a7ec2dee5de90e18c7bcf402e71b51fcf150b129/hyperframe-6.0.1-py3-none-any.whl", hash = "sha256:0ec6bafd80d8ad2195c4f03aacba3a8265e57bc4cff261e802bf39970ed02a15", size = 12389 }, ] +[[package]] +name = "hypothesis" +version = "6.123.17" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "sortedcontainers" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/15/a7/695b2bcb4e8438e1d4683efa6877fc95be293a11251471d4552d6dd08259/hypothesis-6.123.17.tar.gz", hash = "sha256:5850893975b4f08e893ddc10f1d468bc7e011d59703f70fe06a10161e426e602", size = 418572 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/8a/f1c166f048df4b314d0d38e9530b7af516a16160873d724bb416084d6990/hypothesis-6.123.17-py3-none-any.whl", hash = "sha256:5c949fb44935e32c61c64abfcc3975eec41f8205ade2223073ba074c1e078ead", size = 480880 }, +] + [[package]] name = "identify" version = "2.6.3" @@ -3142,7 +3156,7 @@ name = "ipykernel" version = "6.29.5" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "appnope", marker = "sys_platform == 'darwin'" }, + { name = "appnope", marker = "platform_system == 'Darwin'" }, { name = "comm" }, { name = "debugpy" }, { name = "ipython" }, @@ -3233,7 +3247,7 @@ name = "jinxed" version = "1.3.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "ansicon", marker = "sys_platform == 'win32'" }, + { name = "ansicon", marker = "platform_system == 'Windows'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/20/d0/59b2b80e7a52d255f9e0ad040d2e826342d05580c4b1d7d7747cfb8db731/jinxed-1.3.0.tar.gz", hash = "sha256:1593124b18a41b7a3da3b078471442e51dbad3d77b4d4f2b0c26ab6f7d660dbf", size = 80981 } wheels = [ @@ -4040,6 +4054,7 @@ dev = [ { name = "codeflash" }, { name = "dictdiffer" }, { name = "httpx" }, + { name = "hypothesis" }, { name = "ipykernel" }, { name = "mypy" }, { name = "packaging" }, @@ -4182,6 +4197,7 @@ dev = [ { name = "codeflash", specifier = ">=0.8.4" }, { name = "dictdiffer", specifier = ">=0.9.0" }, { name = "httpx", specifier = ">=0.27.0" }, + { name = "hypothesis", specifier = ">=6.123.17" }, { name = "ipykernel", specifier = ">=6.29.0" }, { name = "mypy", specifier = ">=1.11.0" }, { name = "packaging", specifier = ">=24.1,<25.0" }, @@ -6148,7 +6164,7 @@ name = "portalocker" version = "2.10.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "pywin32", marker = "sys_platform == 'win32'" }, + { name = "pywin32", marker = "platform_system == 'Windows'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/ed/d3/c6c64067759e87af98cc668c1cc75171347d0f1577fab7ca3749134e3cd4/portalocker-2.10.1.tar.gz", hash = "sha256:ef1bf844e878ab08aee7e40184156e1151f228f103aa5c6bd0724cc330960f8f", size = 40891 } wheels = [ @@ -8645,19 +8661,19 @@ dependencies = [ { name = "fsspec" }, { name = "jinja2" }, { name = "networkx" }, - { name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cuda-cupti-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cuda-nvrtc-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cuda-runtime-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cufft-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-curand-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cusolver-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cusparse-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, + { name = "nvidia-cuda-cupti-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, + { name = "nvidia-cuda-nvrtc-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, + { name = "nvidia-cuda-runtime-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, + { name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, + { name = "nvidia-cufft-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, + { name = "nvidia-curand-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, + { name = "nvidia-cusolver-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, + { name = "nvidia-cusparse-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, + { name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, + { name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, { name = "sympy" }, - { name = "triton", marker = "python_full_version < '3.13' and platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "triton", marker = "python_full_version < '3.13' and platform_machine == 'x86_64' and platform_system == 'Linux'" }, { name = "typing-extensions" }, ] wheels = [ @@ -8698,7 +8714,7 @@ name = "tqdm" version = "4.67.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "colorama", marker = "platform_system == 'Windows'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737 } wheels = [ From c39bb39772373601f3b4f127432be3b1eb5298d2 Mon Sep 17 00:00:00 2001 From: Cristhian Zanforlin Lousa Date: Thu, 16 Jan 2025 15:41:17 -0300 Subject: [PATCH 14/22] fix: pass slider input values correctly, add test (#5735) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ✨ (base.py): Update field validation to include "slider" type in addition to "float" type for better parameter handling 📝 (constants.py): Add "slider" type to the list of DIRECT_TYPES for consistency and completeness * ✅ (test_inputs.py): add unit test for SliderInput class to ensure it initializes with correct value * 🐛 (base.py): fix comparison of field type with a list by changing it to a set to ensure correct condition evaluation * [autofix.ci] apply automated fixes * fix format * [autofix.ci] apply automated fixes --------- Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- src/backend/base/langflow/graph/vertex/base.py | 2 +- src/backend/base/langflow/utils/constants.py | 13 +------------ src/backend/tests/unit/inputs/test_inputs.py | 6 ++++++ 3 files changed, 8 insertions(+), 13 deletions(-) diff --git a/src/backend/base/langflow/graph/vertex/base.py b/src/backend/base/langflow/graph/vertex/base.py index bda54255b038..05177d111b81 100644 --- a/src/backend/base/langflow/graph/vertex/base.py +++ b/src/backend/base/langflow/graph/vertex/base.py @@ -395,7 +395,7 @@ def build_params(self) -> None: params[field_name] = int(val) except ValueError: params[field_name] = val - elif field.get("type") == "float" and val is not None: + elif field.get("type") in {"float", "slider"} and val is not None: try: params[field_name] = float(val) except ValueError: diff --git a/src/backend/base/langflow/utils/constants.py b/src/backend/base/langflow/utils/constants.py index 21e39fcc758b..8346019aaa70 100644 --- a/src/backend/base/langflow/utils/constants.py +++ b/src/backend/base/langflow/utils/constants.py @@ -52,18 +52,7 @@ def python_function(text: str) -> str: PYTHON_BASIC_TYPES = [str, bool, int, float, tuple, list, dict, set] -DIRECT_TYPES = [ - "str", - "bool", - "dict", - "int", - "float", - "Any", - "prompt", - "code", - "NestedDict", - "table", -] +DIRECT_TYPES = ["str", "bool", "dict", "int", "float", "Any", "prompt", "code", "NestedDict", "table", "slider"] LOADERS_INFO: list[dict[str, Any]] = [ diff --git a/src/backend/tests/unit/inputs/test_inputs.py b/src/backend/tests/unit/inputs/test_inputs.py index 8d4386bcb738..2a946a2a91b1 100644 --- a/src/backend/tests/unit/inputs/test_inputs.py +++ b/src/backend/tests/unit/inputs/test_inputs.py @@ -17,6 +17,7 @@ NestedDictInput, PromptInput, SecretStrInput, + SliderInput, StrInput, TableInput, ) @@ -30,6 +31,11 @@ def test_table_input_valid(): assert data.value == [{"key": "value"}, {"key2": "value2"}] +def test_slider_input_valid(): + data = SliderInput(name="valid_slider", value=10) + assert data.value == 10 + + def test_table_input_invalid(): with pytest.raises(ValidationError): TableInput(name="invalid_table", value="invalid") From 39ef9ba1f9e81db69f259a28b5014b13c247279f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vin=C3=ADcios=20Batista=20da=20Silva?= Date: Thu, 16 Jan 2025 15:41:22 -0300 Subject: [PATCH 15/22] feat: make AWS credentials required in bedrock component (#5710) 1. Make aws_access_key_id field required 2. Make aws_secret_access_key field required --- src/backend/base/langflow/components/models/amazon_bedrock.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/backend/base/langflow/components/models/amazon_bedrock.py b/src/backend/base/langflow/components/models/amazon_bedrock.py index de41558996bb..fa294c94f791 100644 --- a/src/backend/base/langflow/components/models/amazon_bedrock.py +++ b/src/backend/base/langflow/components/models/amazon_bedrock.py @@ -26,6 +26,7 @@ class AmazonBedrockComponent(LCModelComponent): info="The access key for your AWS account." "Usually set in Python code as the environment variable 'AWS_ACCESS_KEY_ID'.", value="AWS_ACCESS_KEY_ID", + required=True, ), SecretStrInput( name="aws_secret_access_key", @@ -33,6 +34,7 @@ class AmazonBedrockComponent(LCModelComponent): info="The secret key for your AWS account. " "Usually set in Python code as the environment variable 'AWS_SECRET_ACCESS_KEY'.", value="AWS_SECRET_ACCESS_KEY", + required=True, ), SecretStrInput( name="aws_session_token", From 778b74dfa8019a4735470593901412666f3bbe91 Mon Sep 17 00:00:00 2001 From: Edwin Jose Date: Thu, 16 Jan 2025 14:11:41 -0500 Subject: [PATCH 16/22] feat: Add function to validate models with tool calling function and related fixes in agent component (#5720) * Update nvidia.py * update agent experience with improving model selection update agent experience with improving model selection and making only the tool calling models available. * variable clean up * [autofix.ci] apply automated fixes * Update src/backend/base/langflow/base/models/model_input_constants.py Co-authored-by: Gabriel Luiz Freitas Almeida * Update src/backend/base/langflow/base/models/model_input_constants.py Co-authored-by: Gabriel Luiz Freitas Almeida * added default models * [autofix.ci] apply automated fixes * [autofix.ci] apply automated fixes (attempt 2/3) * format errors solved * [autofix.ci] apply automated fixes * Update model.py --------- Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: Gabriel Luiz Freitas Almeida --- .../base/langflow/base/models/model.py | 15 +++ .../base/models/model_input_constants.py | 10 +- .../base/langflow/components/agents/agent.py | 21 +++- .../langflow/components/models/anthropic.py | 106 +++++++++++++---- .../base/langflow/components/models/groq.py | 107 ++++++++++++------ .../base/langflow/components/models/nvidia.py | 29 +++-- .../Custom Component Maker.json | 66 +++++++---- .../Instagram Copywriter.json | 2 +- .../starter_projects/Market Research.json | 2 +- .../starter_projects/Research Agent.json | 2 +- .../starter_projects/SaaS Pricing.json | 2 +- .../Sequential Tasks Agents .json | 6 +- .../starter_projects/Simple Agent.json | 2 +- .../Travel Planning Agents.json | 6 +- 14 files changed, 270 insertions(+), 106 deletions(-) diff --git a/src/backend/base/langflow/base/models/model.py b/src/backend/base/langflow/base/models/model.py index b9afe335ac91..a6bfdff05a41 100644 --- a/src/backend/base/langflow/base/models/model.py +++ b/src/backend/base/langflow/base/models/model.py @@ -3,6 +3,7 @@ import warnings from abc import abstractmethod +from langchain_core.language_models import BaseChatModel from langchain_core.language_models.llms import LLM from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage from langchain_core.output_parsers import BaseOutputParser @@ -43,6 +44,20 @@ class LCModelComponent(Component): def _get_exception_message(self, e: Exception): return str(e) + def supports_tool_calling(self, model: LanguageModel) -> bool: + try: + # Check if the bind_tools method is the same as the base class's method + if model.bind_tools is BaseChatModel.bind_tools: + return False + + def test_tool(x: int) -> int: + return x + + model_with_tool = model.bind_tools([test_tool]) + return hasattr(model_with_tool, "tools") and len(model_with_tool.tools) > 0 + except (AttributeError, TypeError, ValueError): + return False + def _validate_outputs(self) -> None: # At least these two outputs must be defined required_output_methods = ["text_response", "build_model"] diff --git a/src/backend/base/langflow/base/models/model_input_constants.py b/src/backend/base/langflow/base/models/model_input_constants.py index dcb7bbcd6094..b11eb91c825a 100644 --- a/src/backend/base/langflow/base/models/model_input_constants.py +++ b/src/backend/base/langflow/base/models/model_input_constants.py @@ -28,7 +28,7 @@ def process_inputs(component_data): if isinstance(component_data, SecretStrInput): component_data.value = "" component_data.load_from_db = False - elif component_data.name == "temperature": + elif component_data.name in {"temperature", "tool_model_enabled", "base_url"}: component_data = set_advanced_true(component_data) return component_data @@ -180,3 +180,11 @@ def _get_amazon_bedrock_inputs_and_fields(): MODEL_PROVIDERS = list(MODEL_PROVIDERS_DICT.keys()) ALL_PROVIDER_FIELDS: list[str] = [field for provider in MODEL_PROVIDERS_DICT.values() for field in provider["fields"]] + +MODEL_DYNAMIC_UPDATE_FIELDS = [ + "api_key", + "model", + "tool_model_enabled", + "base_url", + "model_name", +] diff --git a/src/backend/base/langflow/components/agents/agent.py b/src/backend/base/langflow/components/agents/agent.py index 8705a04a773c..288861eabc74 100644 --- a/src/backend/base/langflow/components/agents/agent.py +++ b/src/backend/base/langflow/components/agents/agent.py @@ -3,6 +3,7 @@ from langflow.base.agents.agent import LCToolsAgentComponent from langflow.base.models.model_input_constants import ( ALL_PROVIDER_FIELDS, + MODEL_DYNAMIC_UPDATE_FIELDS, MODEL_PROVIDERS_DICT, ) from langflow.base.models.model_utils import get_model_name @@ -144,6 +145,16 @@ def _build_llm_model(self, component, inputs, prefix=""): model_kwargs = {input_.name: getattr(self, f"{prefix}{input_.name}") for input_ in inputs} return component.set(**model_kwargs).build_model() + def set_component_params(self, component): + provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm) + if provider_info: + inputs = provider_info.get("inputs") + prefix = provider_info.get("prefix") + model_kwargs = {input_.name: getattr(self, f"{prefix}{input_.name}") for input_ in inputs} + + return component.set(**model_kwargs) + return component + def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None: """Delete specified fields from build_config.""" for field in fields: @@ -164,7 +175,7 @@ async def update_build_config( ) -> dotdict: # Iterate over all providers in the MODEL_PROVIDERS_DICT # Existing logic for updating build_config - if field_name == "agent_llm": + if field_name in ("agent_llm",): provider_info = MODEL_PROVIDERS_DICT.get(field_value) if provider_info: component_class = provider_info.get("component_class") @@ -233,10 +244,15 @@ async def update_build_config( if missing_keys: msg = f"Missing required keys in build_config: {missing_keys}" raise ValueError(msg) - if isinstance(self.agent_llm, str) and self.agent_llm in MODEL_PROVIDERS_DICT: + if ( + isinstance(self.agent_llm, str) + and self.agent_llm in MODEL_PROVIDERS_DICT + and field_name in MODEL_DYNAMIC_UPDATE_FIELDS + ): provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm) if provider_info: component_class = provider_info.get("component_class") + component_class = self.set_component_params(component_class) prefix = provider_info.get("prefix") if component_class and hasattr(component_class, "update_build_config"): # Call each component class's update_build_config method @@ -246,5 +262,4 @@ async def update_build_config( build_config = await update_component_build_config( component_class, build_config, field_value, field_name ) - return build_config diff --git a/src/backend/base/langflow/components/models/anthropic.py b/src/backend/base/langflow/components/models/anthropic.py index 66886b7924ce..b1f0a1b243ab 100644 --- a/src/backend/base/langflow/components/models/anthropic.py +++ b/src/backend/base/langflow/components/models/anthropic.py @@ -1,9 +1,14 @@ -from pydantic.v1 import SecretStr +from typing import Any + +import requests +from loguru import logger from langflow.base.models.anthropic_constants import ANTHROPIC_MODELS from langflow.base.models.model import LCModelComponent from langflow.field_typing import LanguageModel -from langflow.io import DropdownInput, FloatInput, IntInput, MessageTextInput, SecretStrInput +from langflow.field_typing.range_spec import RangeSpec +from langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput, SliderInput +from langflow.schema.dotdict import dotdict class AnthropicModelComponent(LCModelComponent): @@ -22,19 +27,42 @@ class AnthropicModelComponent(LCModelComponent): info="The maximum number of tokens to generate. Set to 0 for unlimited tokens.", ), DropdownInput( - name="model", + name="model_name", display_name="Model Name", - options=ANTHROPIC_MODELS, - info="https://python.langchain.com/docs/integrations/chat/anthropic", - value="claude-3-5-sonnet-latest", + options=[], + refresh_button=True, + real_time_refresh=True, + ), + SecretStrInput( + name="api_key", + display_name="Anthropic API Key", + info="Your Anthropic API key.", + value=None, + real_time_refresh=True, + ), + SliderInput( + name="temperature", + display_name="Temperature", + value=0.1, + info="Run inference with this temperature. Must by in the closed interval [0.0, 1.0].", + range_spec=RangeSpec(min=0, max=1, step=0.01), ), - SecretStrInput(name="anthropic_api_key", display_name="Anthropic API Key", info="Your Anthropic API key."), - FloatInput(name="temperature", display_name="Temperature", value=0.1), MessageTextInput( - name="anthropic_api_url", + name="base_url", display_name="Anthropic API URL", - advanced=True, info="Endpoint of the Anthropic API. Defaults to 'https://api.anthropic.com' if not specified.", + value="https://api.anthropic.com", + real_time_refresh=True, + ), + BoolInput( + name="tool_model_enabled", + display_name="Enable Tool Models", + info=( + "Select if you want to use models that can work with tools. If yes, only those models will be shown." + ), + advanced=False, + value=True, + real_time_refresh=True, ), MessageTextInput( name="prefill", display_name="Prefill", info="Prefill text to guide the model's response.", advanced=True @@ -47,19 +75,13 @@ def build_model(self) -> LanguageModel: # type: ignore[type-var] except ImportError as e: msg = "langchain_anthropic is not installed. Please install it with `pip install langchain_anthropic`." raise ImportError(msg) from e - model = self.model - anthropic_api_key = self.anthropic_api_key - max_tokens = self.max_tokens - temperature = self.temperature - anthropic_api_url = self.anthropic_api_url or "https://api.anthropic.com" - try: output = ChatAnthropic( - model=model, - anthropic_api_key=(SecretStr(anthropic_api_key).get_secret_value() if anthropic_api_key else None), - max_tokens_to_sample=max_tokens, - temperature=temperature, - anthropic_api_url=anthropic_api_url, + model=self.model_name, + anthropic_api_key=self.api_key, + max_tokens_to_sample=self.max_tokens, + temperature=self.temperature, + anthropic_api_url=self.base_url, streaming=self.stream, ) except Exception as e: @@ -68,6 +90,32 @@ def build_model(self) -> LanguageModel: # type: ignore[type-var] return output + def get_models(self, tool_model_enabled: bool | None = None) -> list[str]: + try: + import anthropic + + client = anthropic.Anthropic(api_key=self.api_key) + models = client.models.list(limit=20).data + model_ids = [model.id for model in models] + except (ImportError, ValueError, requests.exceptions.RequestException) as e: + logger.exception(f"Error getting model names: {e}") + model_ids = ANTHROPIC_MODELS + if tool_model_enabled: + try: + from langchain_anthropic.chat_models import ChatAnthropic + except ImportError as e: + msg = "langchain_anthropic is not installed. Please install it with `pip install langchain_anthropic`." + raise ImportError(msg) from e + for model in model_ids: + model_with_tool = ChatAnthropic( + model=self.model_name, + anthropic_api_key=self.api_key, + anthropic_api_url=self.base_url, + ) + if not self.supports_tool_calling(model_with_tool): + model_ids.remove(model) + return model_ids + def _get_exception_message(self, exception: Exception) -> str | None: """Get a message from an Anthropic exception. @@ -86,3 +134,19 @@ def _get_exception_message(self, exception: Exception) -> str | None: if message: return message return None + + def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None): + if field_name in ("base_url", "model_name", "tool_model_enabled", "api_key") and field_value: + try: + if len(self.api_key) != 0: + try: + ids = self.get_models(tool_model_enabled=self.tool_model_enabled) + except (ImportError, ValueError, requests.exceptions.RequestException) as e: + logger.exception(f"Error getting model names: {e}") + ids = ANTHROPIC_MODELS + build_config["model_name"]["options"] = ids + build_config["model_name"]["value"] = ids[0] + except Exception as e: + msg = f"Error getting model names: {e}" + raise ValueError(msg) from e + return build_config diff --git a/src/backend/base/langflow/components/models/groq.py b/src/backend/base/langflow/components/models/groq.py index 010221144886..30fc0c0782a8 100644 --- a/src/backend/base/langflow/components/models/groq.py +++ b/src/backend/base/langflow/components/models/groq.py @@ -1,11 +1,12 @@ import requests +from loguru import logger from pydantic.v1 import SecretStr -from typing_extensions import override from langflow.base.models.groq_constants import GROQ_MODELS from langflow.base.models.model import LCModelComponent from langflow.field_typing import LanguageModel -from langflow.io import DropdownInput, FloatInput, IntInput, MessageTextInput, SecretStrInput +from langflow.field_typing.range_spec import RangeSpec +from langflow.io import BoolInput, DropdownInput, FloatInput, IntInput, MessageTextInput, SecretStrInput, SliderInput class GroqModel(LCModelComponent): @@ -16,13 +17,16 @@ class GroqModel(LCModelComponent): inputs = [ *LCModelComponent._base_inputs, - SecretStrInput(name="groq_api_key", display_name="Groq API Key", info="API key for the Groq API."), + SecretStrInput( + name="api_key", display_name="Groq API Key", info="API key for the Groq API.", real_time_refresh=True + ), MessageTextInput( - name="groq_api_base", + name="base_url", display_name="Groq API Base", info="Base URL path for API requests, leave blank if not using a proxy or service emulator.", advanced=True, value="https://api.groq.com", + real_time_refresh=True, ), IntInput( name="max_tokens", @@ -36,6 +40,13 @@ class GroqModel(LCModelComponent): info="Run inference with this temperature. Must by in the closed interval [0.0, 1.0].", value=0.1, ), + SliderInput( + name="temperature", + display_name="Temperature", + value=0.1, + info="Run inference with this temperature. Must by in the closed interval [0.0, 1.0].", + range_spec=RangeSpec(min=0, max=1, step=0.01), + ), IntInput( name="n", display_name="N", @@ -47,33 +58,65 @@ class GroqModel(LCModelComponent): name="model_name", display_name="Model", info="The name of the model to use.", - options=GROQ_MODELS, - value="llama-3.1-8b-instant", + options=[], refresh_button=True, + real_time_refresh=True, + ), + BoolInput( + name="tool_model_enabled", + display_name="Enable Tool Models", + info=( + "Select if you want to use models that can work with tools. If yes, only those models will be shown." + ), + advanced=False, + value=True, + real_time_refresh=True, ), ] - def get_models(self) -> list[str]: - api_key = self.groq_api_key - base_url = self.groq_api_base or "https://api.groq.com" - url = f"{base_url}/openai/v1/models" - - headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"} - + def get_models(self, tool_model_enabled: bool | None = None) -> list[str]: try: + url = f"{self.base_url}/openai/v1/models" + headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"} + response = requests.get(url, headers=headers, timeout=10) response.raise_for_status() model_list = response.json() - return [model["id"] for model in model_list.get("data", [])] - except requests.RequestException as e: - self.status = f"Error fetching models: {e}" - return GROQ_MODELS + model_ids = [model["id"] for model in model_list.get("data", [])] + except (ImportError, ValueError, requests.exceptions.RequestException) as e: + logger.exception(f"Error getting model names: {e}") + model_ids = GROQ_MODELS + if tool_model_enabled: + try: + from langchain_groq import ChatGroq + except ImportError as e: + msg = "langchain_groq is not installed. Please install it with `pip install langchain_groq`." + raise ImportError(msg) from e + for model in model_ids: + model_with_tool = ChatGroq( + model=model, + api_key=self.api_key, + base_url=self.base_url, + ) + if not self.supports_tool_calling(model_with_tool): + model_ids.remove(model) + return model_ids + return model_ids - @override def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None): - if field_name in {"groq_api_key", "groq_api_base", "model_name"}: - models = self.get_models() - build_config["model_name"]["options"] = models + if field_name in ("base_url", "model_name", "tool_model_enabled", "api_key") and field_value: + try: + if len(self.api_key) != 0: + try: + ids = self.get_models(tool_model_enabled=self.tool_model_enabled) + except (ImportError, ValueError, requests.exceptions.RequestException) as e: + logger.exception(f"Error getting model names: {e}") + ids = GROQ_MODELS + build_config["model_name"]["options"] = ids + build_config["model_name"]["value"] = ids[0] + except Exception as e: + msg = f"Error getting model names: {e}" + raise ValueError(msg) from e return build_config def build_model(self) -> LanguageModel: # type: ignore[type-var] @@ -83,20 +126,12 @@ def build_model(self) -> LanguageModel: # type: ignore[type-var] msg = "langchain-groq is not installed. Please install it with `pip install langchain-groq`." raise ImportError(msg) from e - groq_api_key = self.groq_api_key - model_name = self.model_name - max_tokens = self.max_tokens - temperature = self.temperature - groq_api_base = self.groq_api_base - n = self.n - stream = self.stream - return ChatGroq( - model=model_name, - max_tokens=max_tokens or None, - temperature=temperature, - base_url=groq_api_base, - n=n or 1, - api_key=SecretStr(groq_api_key).get_secret_value(), - streaming=stream, + model=self.model_name, + max_tokens=self.max_tokens or None, + temperature=self.temperature, + base_url=self.base_url, + n=self.n or 1, + api_key=SecretStr(self.api_key).get_secret_value(), + streaming=self.stream, ) diff --git a/src/backend/base/langflow/components/models/nvidia.py b/src/backend/base/langflow/components/models/nvidia.py index 89132d3c6f2b..92b0b60436e9 100644 --- a/src/backend/base/langflow/components/models/nvidia.py +++ b/src/backend/base/langflow/components/models/nvidia.py @@ -2,7 +2,8 @@ from langflow.base.models.model import LCModelComponent from langflow.field_typing import LanguageModel -from langflow.inputs import BoolInput, DropdownInput, FloatInput, IntInput, SecretStrInput, StrInput +from langflow.field_typing.range_spec import RangeSpec +from langflow.inputs import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput, SliderInput from langflow.schema.dotdict import dotdict @@ -23,15 +24,17 @@ class NVIDIAModelComponent(LCModelComponent): name="model_name", display_name="Model Name", advanced=False, - options=["mistralai/mixtral-8x7b-instruct-v0.1"], - value="mistralai/mixtral-8x7b-instruct-v0.1", + options=[], + real_time_refresh=True, + refresh_button=True, ), - StrInput( + MessageTextInput( name="base_url", display_name="NVIDIA Base URL", value="https://integrate.api.nvidia.com/v1", refresh_button=True, info="The base URL of the NVIDIA API. Defaults to https://integrate.api.nvidia.com/v1.", + real_time_refresh=True, ), BoolInput( name="tool_model_enabled", @@ -41,15 +44,23 @@ class NVIDIAModelComponent(LCModelComponent): ), advanced=False, value=True, + real_time_refresh=True, ), SecretStrInput( - name="nvidia_api_key", + name="api_key", display_name="NVIDIA API Key", info="The NVIDIA API Key.", advanced=False, value="NVIDIA_API_KEY", + real_time_refresh=True, + ), + SliderInput( + name="temperature", + display_name="Temperature", + value=0.1, + info="Run inference with this temperature. Must by in the closed interval [0.0, 1.0].", + range_spec=RangeSpec(min=0, max=1, step=0.01), ), - FloatInput(name="temperature", display_name="Temperature", value=0.1), IntInput( name="seed", display_name="Seed", @@ -67,7 +78,7 @@ def get_models(self, tool_model_enabled: bool | None = None) -> list[str]: return [model.id for model in build_model.available_models] def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None): - if field_name == "base_url" and field_value: + if field_name in ("base_url", "model_name", "tool_model_enabled", "api_key") and field_value: try: ids = self.get_models(self.tool_model_enabled) build_config["model_name"]["options"] = ids @@ -83,7 +94,7 @@ def build_model(self) -> LanguageModel: # type: ignore[type-var] except ImportError as e: msg = "Please install langchain-nvidia-ai-endpoints to use the NVIDIA model." raise ImportError(msg) from e - nvidia_api_key = self.nvidia_api_key + api_key = self.api_key temperature = self.temperature model_name: str = self.model_name max_tokens = self.max_tokens @@ -92,7 +103,7 @@ def build_model(self) -> LanguageModel: # type: ignore[type-var] max_tokens=max_tokens or None, model=model_name, base_url=self.base_url, - api_key=nvidia_api_key, + api_key=api_key, temperature=temperature or 0.1, seed=seed, ) diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Custom Component Maker.json b/src/backend/base/langflow/initial_setup/starter_projects/Custom Component Maker.json index 7e3a277589b4..0171edfaf075 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Custom Component Maker.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Custom Component Maker.json @@ -1375,7 +1375,7 @@ "pinned": false, "template": { "_type": "Component", - "anthropic_api_key": { + "api_key": { "_input_type": "SecretStrInput", "advanced": false, "display_name": "Anthropic API Key", @@ -1384,19 +1384,19 @@ "input_types": [ "Message" ], - "load_from_db": false, - "name": "anthropic_api_key", + "load_from_db": true, + "name": "api_key", "password": true, "placeholder": "", + "real_time_refresh": true, "required": false, "show": true, "title_case": false, - "type": "str", - "value": "" + "type": "str" }, - "anthropic_api_url": { + "base_url": { "_input_type": "MessageTextInput", - "advanced": true, + "advanced": false, "display_name": "Anthropic API URL", "dynamic": false, "info": "Endpoint of the Anthropic API. Defaults to 'https://api.anthropic.com' if not specified.", @@ -1404,16 +1404,19 @@ "Message" ], "list": false, + "list_add_label": "Add More", "load_from_db": false, - "name": "anthropic_api_url", + "name": "base_url", "placeholder": "", + "real_time_refresh": true, "required": false, "show": true, "title_case": false, + "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, "type": "str", - "value": "" + "value": "https://api.anthropic.com" }, "code": { "advanced": true, @@ -1431,7 +1434,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from pydantic.v1 import SecretStr\n\nfrom langflow.base.models.anthropic_constants import ANTHROPIC_MODELS\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.field_typing import LanguageModel\nfrom langflow.io import DropdownInput, FloatInput, IntInput, MessageTextInput, SecretStrInput\n\n\nclass AnthropicModelComponent(LCModelComponent):\n display_name = \"Anthropic\"\n description = \"Generate text using Anthropic Chat&Completion LLMs with prefill support.\"\n icon = \"Anthropic\"\n name = \"AnthropicModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n value=4096,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=ANTHROPIC_MODELS,\n info=\"https://python.langchain.com/docs/integrations/chat/anthropic\",\n value=\"claude-3-5-sonnet-latest\",\n ),\n SecretStrInput(name=\"anthropic_api_key\", display_name=\"Anthropic API Key\", info=\"Your Anthropic API key.\"),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n MessageTextInput(\n name=\"anthropic_api_url\",\n display_name=\"Anthropic API URL\",\n advanced=True,\n info=\"Endpoint of the Anthropic API. Defaults to 'https://api.anthropic.com' if not specified.\",\n ),\n MessageTextInput(\n name=\"prefill\", display_name=\"Prefill\", info=\"Prefill text to guide the model's response.\", advanced=True\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n try:\n from langchain_anthropic.chat_models import ChatAnthropic\n except ImportError as e:\n msg = \"langchain_anthropic is not installed. Please install it with `pip install langchain_anthropic`.\"\n raise ImportError(msg) from e\n model = self.model\n anthropic_api_key = self.anthropic_api_key\n max_tokens = self.max_tokens\n temperature = self.temperature\n anthropic_api_url = self.anthropic_api_url or \"https://api.anthropic.com\"\n\n try:\n output = ChatAnthropic(\n model=model,\n anthropic_api_key=(SecretStr(anthropic_api_key).get_secret_value() if anthropic_api_key else None),\n max_tokens_to_sample=max_tokens,\n temperature=temperature,\n anthropic_api_url=anthropic_api_url,\n streaming=self.stream,\n )\n except Exception as e:\n msg = \"Could not connect to Anthropic API.\"\n raise ValueError(msg) from e\n\n return output\n\n def _get_exception_message(self, exception: Exception) -> str | None:\n \"\"\"Get a message from an Anthropic exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from anthropic import BadRequestError\n except ImportError:\n return None\n if isinstance(exception, BadRequestError):\n message = exception.body.get(\"error\", {}).get(\"message\")\n if message:\n return message\n return None\n" + "value": "from typing import Any\n\nimport requests\nfrom loguru import logger\n\nfrom langflow.base.models.anthropic_constants import ANTHROPIC_MODELS\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput, SliderInput\nfrom langflow.schema.dotdict import dotdict\n\n\nclass AnthropicModelComponent(LCModelComponent):\n display_name = \"Anthropic\"\n description = \"Generate text using Anthropic Chat&Completion LLMs with prefill support.\"\n icon = \"Anthropic\"\n name = \"AnthropicModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n value=4096,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=[],\n refresh_button=True,\n real_time_refresh=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Anthropic API Key\",\n info=\"Your Anthropic API key.\",\n value=None,\n real_time_refresh=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n info=\"Run inference with this temperature. Must by in the closed interval [0.0, 1.0].\",\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n ),\n MessageTextInput(\n name=\"base_url\",\n display_name=\"Anthropic API URL\",\n info=\"Endpoint of the Anthropic API. Defaults to 'https://api.anthropic.com' if not specified.\",\n value=\"https://api.anthropic.com\",\n real_time_refresh=True,\n ),\n BoolInput(\n name=\"tool_model_enabled\",\n display_name=\"Enable Tool Models\",\n info=(\n \"Select if you want to use models that can work with tools. If yes, only those models will be shown.\"\n ),\n advanced=False,\n value=True,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"prefill\", display_name=\"Prefill\", info=\"Prefill text to guide the model's response.\", advanced=True\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n try:\n from langchain_anthropic.chat_models import ChatAnthropic\n except ImportError as e:\n msg = \"langchain_anthropic is not installed. Please install it with `pip install langchain_anthropic`.\"\n raise ImportError(msg) from e\n try:\n output = ChatAnthropic(\n model=self.model_name,\n anthropic_api_key=self.api_key,\n max_tokens_to_sample=self.max_tokens,\n temperature=self.temperature,\n anthropic_api_url=self.base_url,\n streaming=self.stream,\n )\n except Exception as e:\n msg = \"Could not connect to Anthropic API.\"\n raise ValueError(msg) from e\n\n return output\n\n def get_models(self, tool_model_enabled: bool | None = None) -> list[str]:\n try:\n import anthropic\n\n client = anthropic.Anthropic(api_key=self.api_key)\n models = client.models.list(limit=20).data\n model_ids = [model.id for model in models]\n except (ImportError, ValueError, requests.exceptions.RequestException) as e:\n logger.exception(f\"Error getting model names: {e}\")\n model_ids = ANTHROPIC_MODELS\n if tool_model_enabled:\n try:\n from langchain_anthropic.chat_models import ChatAnthropic\n except ImportError as e:\n msg = \"langchain_anthropic is not installed. Please install it with `pip install langchain_anthropic`.\"\n raise ImportError(msg) from e\n for model in model_ids:\n model_with_tool = ChatAnthropic(\n model=self.model_name,\n anthropic_api_key=self.api_key,\n anthropic_api_url=self.base_url,\n )\n if not self.supports_tool_calling(model_with_tool):\n model_ids.remove(model)\n return model_ids\n\n def _get_exception_message(self, exception: Exception) -> str | None:\n \"\"\"Get a message from an Anthropic exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from anthropic import BadRequestError\n except ImportError:\n return None\n if isinstance(exception, BadRequestError):\n message = exception.body.get(\"error\", {}).get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None):\n if field_name in (\"base_url\", \"model_name\", \"tool_model_enabled\", \"api_key\") and field_value:\n try:\n if len(self.api_key) != 0:\n try:\n ids = self.get_models(tool_model_enabled=self.tool_model_enabled)\n except (ImportError, ValueError, requests.exceptions.RequestException) as e:\n logger.exception(f\"Error getting model names: {e}\")\n ids = ANTHROPIC_MODELS\n build_config[\"model_name\"][\"options\"] = ids\n build_config[\"model_name\"][\"value\"] = ids[0]\n except Exception as e:\n msg = f\"Error getting model names: {e}\"\n raise ValueError(msg) from e\n return build_config\n" }, "input_value": { "_input_type": "MessageInput", @@ -1470,31 +1473,25 @@ "type": "int", "value": 4096 }, - "model": { + "model_name": { "_input_type": "DropdownInput", "advanced": false, "combobox": false, "display_name": "Model Name", "dynamic": false, - "info": "https://python.langchain.com/docs/integrations/chat/anthropic", - "name": "model", - "options": [ - "claude-3-5-sonnet-latest", - "claude-3-5-haiku-latest", - "claude-3-opus-latest", - "claude-3-5-sonnet-20240620", - "claude-3-5-sonnet-20241022", - "claude-3-5-haiku-20241022", - "claude-3-sonnet-20240229", - "claude-3-haiku-20240307" - ], + "info": "", + "name": "model_name", + "options": [], "placeholder": "", + "real_time_refresh": true, + "refresh_button": true, "required": false, "show": true, "title_case": false, + "tool_mode": false, "trace_as_metadata": true, "type": "str", - "value": "claude-3-5-sonnet-20240620" + "value": "" }, "prefill": { "_input_type": "MessageTextInput", @@ -1559,7 +1556,7 @@ "advanced": false, "display_name": "Temperature", "dynamic": false, - "info": "", + "info": "Run inference with this temperature. Must by in the closed interval [0.0, 1.0].", "list": false, "name": "temperature", "placeholder": "", @@ -1569,6 +1566,25 @@ "trace_as_metadata": true, "type": "float", "value": 0.1 + }, + "tool_model_enabled": { + "_input_type": "BoolInput", + "advanced": false, + "display_name": "Enable Tool Models", + "dynamic": false, + "info": "Select if you want to use models that can work with tools. If yes, only those models will be shown.", + "list": false, + "list_add_label": "Add More", + "name": "tool_model_enabled", + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "bool", + "value": true } } }, diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Instagram Copywriter.json b/src/backend/base/langflow/initial_setup/starter_projects/Instagram Copywriter.json index 9fb42cde6026..ef39a5a99aae 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Instagram Copywriter.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Instagram Copywriter.json @@ -1735,7 +1735,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n except Exception as e:\n # Log the error for debugging purposes\n logger.error(f\"Error retrieving language model: {e}\")\n raise\n\n try:\n self.chat_history = await self.get_memory_data()\n except Exception as e:\n logger.error(f\"Error retrieving chat history: {e}\")\n raise\n\n if self.add_current_date_tool:\n try:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n except Exception as e:\n logger.error(f\"Error adding current date tool: {e}\")\n raise\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n logger.error(msg)\n raise ValueError(msg)\n\n try:\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n except Exception as e:\n logger.error(f\"Error setting up the agent: {e}\")\n raise\n\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name == \"agent_llm\":\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if isinstance(self.agent_llm, str) and self.agent_llm in MODEL_PROVIDERS_DICT:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n return build_config\n" + "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n except Exception as e:\n # Log the error for debugging purposes\n logger.error(f\"Error retrieving language model: {e}\")\n raise\n\n try:\n self.chat_history = await self.get_memory_data()\n except Exception as e:\n logger.error(f\"Error retrieving chat history: {e}\")\n raise\n\n if self.add_current_date_tool:\n try:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n except Exception as e:\n logger.error(f\"Error adding current date tool: {e}\")\n raise\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n logger.error(msg)\n raise ValueError(msg)\n\n try:\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n except Exception as e:\n logger.error(f\"Error setting up the agent: {e}\")\n raise\n\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n return build_config\n" }, "handle_parsing_errors": { "_input_type": "BoolInput", diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Market Research.json b/src/backend/base/langflow/initial_setup/starter_projects/Market Research.json index b4df2d8b111c..4fd9ee2e2aaf 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Market Research.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Market Research.json @@ -1979,7 +1979,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n except Exception as e:\n # Log the error for debugging purposes\n logger.error(f\"Error retrieving language model: {e}\")\n raise\n\n try:\n self.chat_history = await self.get_memory_data()\n except Exception as e:\n logger.error(f\"Error retrieving chat history: {e}\")\n raise\n\n if self.add_current_date_tool:\n try:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n except Exception as e:\n logger.error(f\"Error adding current date tool: {e}\")\n raise\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n logger.error(msg)\n raise ValueError(msg)\n\n try:\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n except Exception as e:\n logger.error(f\"Error setting up the agent: {e}\")\n raise\n\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name == \"agent_llm\":\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if isinstance(self.agent_llm, str) and self.agent_llm in MODEL_PROVIDERS_DICT:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n return build_config\n" + "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n except Exception as e:\n # Log the error for debugging purposes\n logger.error(f\"Error retrieving language model: {e}\")\n raise\n\n try:\n self.chat_history = await self.get_memory_data()\n except Exception as e:\n logger.error(f\"Error retrieving chat history: {e}\")\n raise\n\n if self.add_current_date_tool:\n try:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n except Exception as e:\n logger.error(f\"Error adding current date tool: {e}\")\n raise\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n logger.error(msg)\n raise ValueError(msg)\n\n try:\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n except Exception as e:\n logger.error(f\"Error setting up the agent: {e}\")\n raise\n\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n return build_config\n" }, "handle_parsing_errors": { "_input_type": "BoolInput", diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Research Agent.json b/src/backend/base/langflow/initial_setup/starter_projects/Research Agent.json index 35466095aa77..a7fa3864e1c0 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Research Agent.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Research Agent.json @@ -2226,7 +2226,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n except Exception as e:\n # Log the error for debugging purposes\n logger.error(f\"Error retrieving language model: {e}\")\n raise\n\n try:\n self.chat_history = await self.get_memory_data()\n except Exception as e:\n logger.error(f\"Error retrieving chat history: {e}\")\n raise\n\n if self.add_current_date_tool:\n try:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n except Exception as e:\n logger.error(f\"Error adding current date tool: {e}\")\n raise\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n logger.error(msg)\n raise ValueError(msg)\n\n try:\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n except Exception as e:\n logger.error(f\"Error setting up the agent: {e}\")\n raise\n\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name == \"agent_llm\":\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if isinstance(self.agent_llm, str) and self.agent_llm in MODEL_PROVIDERS_DICT:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n return build_config\n" + "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n except Exception as e:\n # Log the error for debugging purposes\n logger.error(f\"Error retrieving language model: {e}\")\n raise\n\n try:\n self.chat_history = await self.get_memory_data()\n except Exception as e:\n logger.error(f\"Error retrieving chat history: {e}\")\n raise\n\n if self.add_current_date_tool:\n try:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n except Exception as e:\n logger.error(f\"Error adding current date tool: {e}\")\n raise\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n logger.error(msg)\n raise ValueError(msg)\n\n try:\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n except Exception as e:\n logger.error(f\"Error setting up the agent: {e}\")\n raise\n\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n return build_config\n" }, "handle_parsing_errors": { "_input_type": "BoolInput", diff --git a/src/backend/base/langflow/initial_setup/starter_projects/SaaS Pricing.json b/src/backend/base/langflow/initial_setup/starter_projects/SaaS Pricing.json index 7f56612f3f9c..daa608ebf77e 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/SaaS Pricing.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/SaaS Pricing.json @@ -802,7 +802,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n except Exception as e:\n # Log the error for debugging purposes\n logger.error(f\"Error retrieving language model: {e}\")\n raise\n\n try:\n self.chat_history = await self.get_memory_data()\n except Exception as e:\n logger.error(f\"Error retrieving chat history: {e}\")\n raise\n\n if self.add_current_date_tool:\n try:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n except Exception as e:\n logger.error(f\"Error adding current date tool: {e}\")\n raise\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n logger.error(msg)\n raise ValueError(msg)\n\n try:\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n except Exception as e:\n logger.error(f\"Error setting up the agent: {e}\")\n raise\n\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name == \"agent_llm\":\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if isinstance(self.agent_llm, str) and self.agent_llm in MODEL_PROVIDERS_DICT:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n return build_config\n" + "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n except Exception as e:\n # Log the error for debugging purposes\n logger.error(f\"Error retrieving language model: {e}\")\n raise\n\n try:\n self.chat_history = await self.get_memory_data()\n except Exception as e:\n logger.error(f\"Error retrieving chat history: {e}\")\n raise\n\n if self.add_current_date_tool:\n try:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n except Exception as e:\n logger.error(f\"Error adding current date tool: {e}\")\n raise\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n logger.error(msg)\n raise ValueError(msg)\n\n try:\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n except Exception as e:\n logger.error(f\"Error setting up the agent: {e}\")\n raise\n\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n return build_config\n" }, "handle_parsing_errors": { "_input_type": "BoolInput", diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Sequential Tasks Agents .json b/src/backend/base/langflow/initial_setup/starter_projects/Sequential Tasks Agents .json index 41b5cc28ec41..9c488c4105e5 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Sequential Tasks Agents .json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Sequential Tasks Agents .json @@ -747,7 +747,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n except Exception as e:\n # Log the error for debugging purposes\n logger.error(f\"Error retrieving language model: {e}\")\n raise\n\n try:\n self.chat_history = await self.get_memory_data()\n except Exception as e:\n logger.error(f\"Error retrieving chat history: {e}\")\n raise\n\n if self.add_current_date_tool:\n try:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n except Exception as e:\n logger.error(f\"Error adding current date tool: {e}\")\n raise\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n logger.error(msg)\n raise ValueError(msg)\n\n try:\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n except Exception as e:\n logger.error(f\"Error setting up the agent: {e}\")\n raise\n\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name == \"agent_llm\":\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if isinstance(self.agent_llm, str) and self.agent_llm in MODEL_PROVIDERS_DICT:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n return build_config\n" + "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n except Exception as e:\n # Log the error for debugging purposes\n logger.error(f\"Error retrieving language model: {e}\")\n raise\n\n try:\n self.chat_history = await self.get_memory_data()\n except Exception as e:\n logger.error(f\"Error retrieving chat history: {e}\")\n raise\n\n if self.add_current_date_tool:\n try:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n except Exception as e:\n logger.error(f\"Error adding current date tool: {e}\")\n raise\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n logger.error(msg)\n raise ValueError(msg)\n\n try:\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n except Exception as e:\n logger.error(f\"Error setting up the agent: {e}\")\n raise\n\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n return build_config\n" }, "handle_parsing_errors": { "_input_type": "BoolInput", @@ -1323,7 +1323,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n except Exception as e:\n # Log the error for debugging purposes\n logger.error(f\"Error retrieving language model: {e}\")\n raise\n\n try:\n self.chat_history = await self.get_memory_data()\n except Exception as e:\n logger.error(f\"Error retrieving chat history: {e}\")\n raise\n\n if self.add_current_date_tool:\n try:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n except Exception as e:\n logger.error(f\"Error adding current date tool: {e}\")\n raise\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n logger.error(msg)\n raise ValueError(msg)\n\n try:\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n except Exception as e:\n logger.error(f\"Error setting up the agent: {e}\")\n raise\n\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name == \"agent_llm\":\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if isinstance(self.agent_llm, str) and self.agent_llm in MODEL_PROVIDERS_DICT:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n return build_config\n" + "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n except Exception as e:\n # Log the error for debugging purposes\n logger.error(f\"Error retrieving language model: {e}\")\n raise\n\n try:\n self.chat_history = await self.get_memory_data()\n except Exception as e:\n logger.error(f\"Error retrieving chat history: {e}\")\n raise\n\n if self.add_current_date_tool:\n try:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n except Exception as e:\n logger.error(f\"Error adding current date tool: {e}\")\n raise\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n logger.error(msg)\n raise ValueError(msg)\n\n try:\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n except Exception as e:\n logger.error(f\"Error setting up the agent: {e}\")\n raise\n\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n return build_config\n" }, "handle_parsing_errors": { "_input_type": "BoolInput", @@ -3212,7 +3212,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n except Exception as e:\n # Log the error for debugging purposes\n logger.error(f\"Error retrieving language model: {e}\")\n raise\n\n try:\n self.chat_history = await self.get_memory_data()\n except Exception as e:\n logger.error(f\"Error retrieving chat history: {e}\")\n raise\n\n if self.add_current_date_tool:\n try:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n except Exception as e:\n logger.error(f\"Error adding current date tool: {e}\")\n raise\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n logger.error(msg)\n raise ValueError(msg)\n\n try:\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n except Exception as e:\n logger.error(f\"Error setting up the agent: {e}\")\n raise\n\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name == \"agent_llm\":\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if isinstance(self.agent_llm, str) and self.agent_llm in MODEL_PROVIDERS_DICT:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n return build_config\n" + "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n except Exception as e:\n # Log the error for debugging purposes\n logger.error(f\"Error retrieving language model: {e}\")\n raise\n\n try:\n self.chat_history = await self.get_memory_data()\n except Exception as e:\n logger.error(f\"Error retrieving chat history: {e}\")\n raise\n\n if self.add_current_date_tool:\n try:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n except Exception as e:\n logger.error(f\"Error adding current date tool: {e}\")\n raise\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n logger.error(msg)\n raise ValueError(msg)\n\n try:\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n except Exception as e:\n logger.error(f\"Error setting up the agent: {e}\")\n raise\n\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n return build_config\n" }, "handle_parsing_errors": { "_input_type": "BoolInput", diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Simple Agent.json b/src/backend/base/langflow/initial_setup/starter_projects/Simple Agent.json index 596ad8c38410..6cff25a012ef 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Simple Agent.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Simple Agent.json @@ -275,7 +275,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n except Exception as e:\n # Log the error for debugging purposes\n logger.error(f\"Error retrieving language model: {e}\")\n raise\n\n try:\n self.chat_history = await self.get_memory_data()\n except Exception as e:\n logger.error(f\"Error retrieving chat history: {e}\")\n raise\n\n if self.add_current_date_tool:\n try:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n except Exception as e:\n logger.error(f\"Error adding current date tool: {e}\")\n raise\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n logger.error(msg)\n raise ValueError(msg)\n\n try:\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n except Exception as e:\n logger.error(f\"Error setting up the agent: {e}\")\n raise\n\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name == \"agent_llm\":\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if isinstance(self.agent_llm, str) and self.agent_llm in MODEL_PROVIDERS_DICT:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n return build_config\n" + "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n except Exception as e:\n # Log the error for debugging purposes\n logger.error(f\"Error retrieving language model: {e}\")\n raise\n\n try:\n self.chat_history = await self.get_memory_data()\n except Exception as e:\n logger.error(f\"Error retrieving chat history: {e}\")\n raise\n\n if self.add_current_date_tool:\n try:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n except Exception as e:\n logger.error(f\"Error adding current date tool: {e}\")\n raise\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n logger.error(msg)\n raise ValueError(msg)\n\n try:\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n except Exception as e:\n logger.error(f\"Error setting up the agent: {e}\")\n raise\n\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n return build_config\n" }, "handle_parsing_errors": { "_input_type": "BoolInput", diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Travel Planning Agents.json b/src/backend/base/langflow/initial_setup/starter_projects/Travel Planning Agents.json index 428d50439285..a1119bdf188d 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Travel Planning Agents.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Travel Planning Agents.json @@ -1374,7 +1374,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n except Exception as e:\n # Log the error for debugging purposes\n logger.error(f\"Error retrieving language model: {e}\")\n raise\n\n try:\n self.chat_history = await self.get_memory_data()\n except Exception as e:\n logger.error(f\"Error retrieving chat history: {e}\")\n raise\n\n if self.add_current_date_tool:\n try:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n except Exception as e:\n logger.error(f\"Error adding current date tool: {e}\")\n raise\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n logger.error(msg)\n raise ValueError(msg)\n\n try:\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n except Exception as e:\n logger.error(f\"Error setting up the agent: {e}\")\n raise\n\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name == \"agent_llm\":\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if isinstance(self.agent_llm, str) and self.agent_llm in MODEL_PROVIDERS_DICT:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n return build_config\n" + "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n except Exception as e:\n # Log the error for debugging purposes\n logger.error(f\"Error retrieving language model: {e}\")\n raise\n\n try:\n self.chat_history = await self.get_memory_data()\n except Exception as e:\n logger.error(f\"Error retrieving chat history: {e}\")\n raise\n\n if self.add_current_date_tool:\n try:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n except Exception as e:\n logger.error(f\"Error adding current date tool: {e}\")\n raise\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n logger.error(msg)\n raise ValueError(msg)\n\n try:\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n except Exception as e:\n logger.error(f\"Error setting up the agent: {e}\")\n raise\n\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n return build_config\n" }, "handle_parsing_errors": { "_input_type": "BoolInput", @@ -1950,7 +1950,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n except Exception as e:\n # Log the error for debugging purposes\n logger.error(f\"Error retrieving language model: {e}\")\n raise\n\n try:\n self.chat_history = await self.get_memory_data()\n except Exception as e:\n logger.error(f\"Error retrieving chat history: {e}\")\n raise\n\n if self.add_current_date_tool:\n try:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n except Exception as e:\n logger.error(f\"Error adding current date tool: {e}\")\n raise\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n logger.error(msg)\n raise ValueError(msg)\n\n try:\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n except Exception as e:\n logger.error(f\"Error setting up the agent: {e}\")\n raise\n\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name == \"agent_llm\":\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if isinstance(self.agent_llm, str) and self.agent_llm in MODEL_PROVIDERS_DICT:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n return build_config\n" + "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n except Exception as e:\n # Log the error for debugging purposes\n logger.error(f\"Error retrieving language model: {e}\")\n raise\n\n try:\n self.chat_history = await self.get_memory_data()\n except Exception as e:\n logger.error(f\"Error retrieving chat history: {e}\")\n raise\n\n if self.add_current_date_tool:\n try:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n except Exception as e:\n logger.error(f\"Error adding current date tool: {e}\")\n raise\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n logger.error(msg)\n raise ValueError(msg)\n\n try:\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n except Exception as e:\n logger.error(f\"Error setting up the agent: {e}\")\n raise\n\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n return build_config\n" }, "handle_parsing_errors": { "_input_type": "BoolInput", @@ -2526,7 +2526,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n except Exception as e:\n # Log the error for debugging purposes\n logger.error(f\"Error retrieving language model: {e}\")\n raise\n\n try:\n self.chat_history = await self.get_memory_data()\n except Exception as e:\n logger.error(f\"Error retrieving chat history: {e}\")\n raise\n\n if self.add_current_date_tool:\n try:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n except Exception as e:\n logger.error(f\"Error adding current date tool: {e}\")\n raise\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n logger.error(msg)\n raise ValueError(msg)\n\n try:\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n except Exception as e:\n logger.error(f\"Error setting up the agent: {e}\")\n raise\n\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name == \"agent_llm\":\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if isinstance(self.agent_llm, str) and self.agent_llm in MODEL_PROVIDERS_DICT:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n return build_config\n" + "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import (\n ToolCallingAgentComponent,\n)\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n except Exception as e:\n # Log the error for debugging purposes\n logger.error(f\"Error retrieving language model: {e}\")\n raise\n\n try:\n self.chat_history = await self.get_memory_data()\n except Exception as e:\n logger.error(f\"Error retrieving chat history: {e}\")\n raise\n\n if self.add_current_date_tool:\n try:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = CurrentDateComponent().to_toolkit()[0]\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n except Exception as e:\n logger.error(f\"Error adding current date tool: {e}\")\n raise\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n logger.error(msg)\n raise ValueError(msg)\n\n try:\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n except Exception as e:\n logger.error(f\"Error setting up the agent: {e}\")\n raise\n\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return await MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, field_name\n )\n return build_config\n" }, "handle_parsing_errors": { "_input_type": "BoolInput", From 2acd434e09e0206730f3d510e7ded760a8cd40ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebasti=C3=A1n=20Est=C3=A9vez?= Date: Thu, 16 Jan 2025 15:54:34 -0500 Subject: [PATCH 17/22] feat: assistants agent improvements (#5581) * assistants agent improvements * remove alembic init file * vector store / file upload support * use sync file object (required by sdk) * steps * self.tools initialization * improvements for edwin * add name and switch to MultilineInput * ci fixes --- pyproject.toml | 2 +- src/backend/base/langflow/alembic/__init__.py | 0 src/backend/base/langflow/alembic/env.py | 1 + .../langflow/base/astra_assistants/util.py | 98 ++++++++ .../astra_assistant_manager.py | 237 +++++++++++++++--- uv.lock | 44 ++-- 6 files changed, 326 insertions(+), 56 deletions(-) delete mode 100644 src/backend/base/langflow/alembic/__init__.py diff --git a/pyproject.toml b/pyproject.toml index 929187ae19ec..247c408790a2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -75,7 +75,7 @@ dependencies = [ "langsmith==0.1.147", "yfinance==0.2.50", "wolframalpha==5.1.3", - "astra-assistants[tools]~=2.2.6", + "astra-assistants[tools]~=2.2.9", "composio-langchain==0.6.13", "composio-core==0.6.13", "spider-client==0.1.24", diff --git a/src/backend/base/langflow/alembic/__init__.py b/src/backend/base/langflow/alembic/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/src/backend/base/langflow/alembic/env.py b/src/backend/base/langflow/alembic/env.py index 8cf6002c42da..815904641f63 100644 --- a/src/backend/base/langflow/alembic/env.py +++ b/src/backend/base/langflow/alembic/env.py @@ -1,3 +1,4 @@ +# noqa: INP001 from logging.config import fileConfig from alembic import context diff --git a/src/backend/base/langflow/base/astra_assistants/util.py b/src/backend/base/langflow/base/astra_assistants/util.py index 13a79e01d6dc..2cde2d63b4fa 100644 --- a/src/backend/base/langflow/base/astra_assistants/util.py +++ b/src/backend/base/langflow/base/astra_assistants/util.py @@ -4,14 +4,20 @@ import os import pkgutil import threading +import uuid from json.decoder import JSONDecodeError +from pathlib import Path +from typing import Any import astra_assistants.tools as astra_assistants_tools import requests from astra_assistants import OpenAIWithDefaultKey, patch from astra_assistants.tools.tool_interface import ToolInterface +from langchain_core.tools import BaseTool +from pydantic import BaseModel from requests.exceptions import RequestException +from langflow.components.tools.mcp_stdio import create_input_schema_from_json_schema from langflow.services.cache.utils import CacheMiss client_lock = threading.Lock() @@ -64,3 +70,95 @@ def tools_from_package(your_package) -> None: tools_from_package(astra_assistants_tools) + + +def wrap_base_tool_as_tool_interface(base_tool: BaseTool) -> ToolInterface: + """wrap_Base_tool_ass_tool_interface. + + Wrap a BaseTool instance in a new class implementing ToolInterface, + building a dynamic Pydantic model from its args_schema (if any). + We only call `args_schema()` if it's truly a function/method, + avoiding accidental calls on a Pydantic model class (which is also callable). + """ + raw_args_schema = getattr(base_tool, "args_schema", None) + + # --- 1) Distinguish between a function/method vs. class/dict/None --- + if inspect.isfunction(raw_args_schema) or inspect.ismethod(raw_args_schema): + # It's actually a function -> call it once to get a class or dict + raw_args_schema = raw_args_schema() + # Otherwise, if it's a class or dict, do nothing here + + # Now `raw_args_schema` might be: + # - A Pydantic model class (subclass of BaseModel) + # - A dict (JSON schema) + # - None + # - Something unexpected => raise error + + # --- 2) Convert the schema or model class to a JSON schema dict --- + if raw_args_schema is None: + # No schema => minimal + schema_dict = {"type": "object", "properties": {}} + + elif isinstance(raw_args_schema, dict): + # Already a JSON schema + schema_dict = raw_args_schema + + elif inspect.isclass(raw_args_schema) and issubclass(raw_args_schema, BaseModel): + # It's a Pydantic model class -> convert to JSON schema + schema_dict = raw_args_schema.schema() + + else: + msg = f"args_schema must be a Pydantic model class, a JSON schema dict, or None. Got: {raw_args_schema!r}" + raise TypeError(msg) + + # --- 3) Build our dynamic Pydantic model from the JSON schema --- + InputSchema: type[BaseModel] = create_input_schema_from_json_schema(schema_dict) # noqa: N806 + + # --- 4) Define a wrapper class that uses composition --- + class WrappedDynamicTool(ToolInterface): + """WrappedDynamicTool. + + Uses composition to delegate logic to the original base_tool, + but sets `call(..., arguments: InputSchema)` so we have a real model. + """ + + def __init__(self, tool: BaseTool): + self._tool = tool + + def call(self, arguments: InputSchema) -> dict: # type: ignore # noqa: PGH003 + output = self._tool.invoke(arguments.dict()) # type: ignore # noqa: PGH003 + result = "" + if "error" in output[0].data: + result = output[0].data["error"] + elif "result" in output[0].data: + result = output[0].data["result"] + return {"cache_id": str(uuid.uuid4()), "output": result} + + def run(self, tool_input: Any) -> str: + return self._tool.run(tool_input) + + def name(self) -> str: + """Return the base tool's name if it exists.""" + if hasattr(self._tool, "name"): + return str(self._tool.name) + return super().name() + + def to_function(self): + """Incorporate the base tool's description if present.""" + params = InputSchema.schema() + description = getattr(self._tool, "description", "A dynamically wrapped tool") + return { + "type": "function", + "function": {"name": self.name(), "description": description, "parameters": params}, + } + + # Return an instance of our newly minted class + return WrappedDynamicTool(base_tool) + + +def sync_upload(file_path, client): + with Path(file_path).open("rb") as sync_file_handle: + return client.files.create( + file=sync_file_handle, # Pass the sync file handle + purpose="assistants", + ) diff --git a/src/backend/base/langflow/components/astra_assistants/astra_assistant_manager.py b/src/backend/base/langflow/components/astra_assistants/astra_assistant_manager.py index 989d0162ecfa..74be041f9f8a 100644 --- a/src/backend/base/langflow/components/astra_assistants/astra_assistant_manager.py +++ b/src/backend/base/langflow/components/astra_assistants/astra_assistant_manager.py @@ -1,70 +1,145 @@ import asyncio +from asyncio import to_thread +from typing import TYPE_CHECKING, Any, cast from astra_assistants.astra_assistants_manager import AssistantManager +from langchain_core.agents import AgentFinish from loguru import logger +from langflow.base.agents.events import ExceptionWithMessageError, process_agent_events from langflow.base.astra_assistants.util import ( get_patched_openai_client, litellm_model_names, - tool_names, - tools_and_names, + sync_upload, + wrap_base_tool_as_tool_interface, ) from langflow.custom.custom_component.component_with_cache import ComponentWithCache -from langflow.inputs import DropdownInput, MultilineInput, StrInput +from langflow.inputs import DropdownInput, FileInput, HandleInput, MultilineInput +from langflow.memory import delete_message +from langflow.schema.content_block import ContentBlock from langflow.schema.message import Message from langflow.template import Output +from langflow.utils.constants import MESSAGE_SENDER_AI + +if TYPE_CHECKING: + from langflow.schema.log import SendMessageFunctionType class AstraAssistantManager(ComponentWithCache): - display_name = "Astra Assistant Manager" + display_name = "Astra Assistant Agent" + name = "Astra Assistant Agent" description = "Manages Assistant Interactions" icon = "AstraDB" inputs = [ - StrInput( - name="instructions", - display_name="Instructions", - info="Instructions for the assistant, think of these as the system prompt.", - ), DropdownInput( name="model_name", - display_name="Model Name", + display_name="Model", advanced=False, options=litellm_model_names, value="gpt-4o-mini", ), - DropdownInput( - display_name="Tool", - name="tool", - options=tool_names, + MultilineInput( + name="instructions", + display_name="Agent Instructions", + info="Instructions for the assistant, think of these as the system prompt.", + ), + HandleInput( + name="input_tools", + display_name="Tools", + input_types=["Tool"], + is_list=True, + required=False, + info="These are the tools that the agent can use to help with tasks.", ), + # DropdownInput( + # display_name="Tools", + # name="tool", + # options=tool_names, + # ), MultilineInput( - name="user_message", - display_name="User Message", - info="User message to pass to the run.", + name="user_message", display_name="User Message", info="User message to pass to the run.", tool_mode=True + ), + FileInput( + name="file", + display_name="File(s) for retrieval", + list=True, + info="Files to be sent with the message.", + required=False, + show=True, + file_types=[ + "txt", + "md", + "mdx", + "csv", + "json", + "yaml", + "yml", + "xml", + "html", + "htm", + "pdf", + "docx", + "py", + "sh", + "sql", + "js", + "ts", + "tsx", + "jpg", + "jpeg", + "png", + "bmp", + "image", + "zip", + "tar", + "tgz", + "bz2", + "gz", + "c", + "cpp", + "cs", + "css", + "go", + "java", + "php", + "rb", + "tex", + "doc", + "docx", + "ppt", + "pptx", + "xls", + "xlsx", + "jsonl", + ], ), MultilineInput( name="input_thread_id", display_name="Thread ID (optional)", info="ID of the thread", + advanced=True, ), MultilineInput( name="input_assistant_id", display_name="Assistant ID (optional)", info="ID of the assistant", + advanced=True, ), MultilineInput( name="env_set", display_name="Environment Set", info="Dummy input to allow chaining with Dotenv Component.", + advanced=True, ), ] outputs = [ Output(display_name="Assistant Response", name="assistant_response", method="get_assistant_response"), - Output(display_name="Tool output", name="tool_output", method="get_tool_output"), - Output(display_name="Thread Id", name="output_thread_id", method="get_thread_id"), - Output(display_name="Assistant Id", name="output_assistant_id", method="get_assistant_id"), + Output(display_name="Tool output", name="tool_output", method="get_tool_output", hidden=True), + Output(display_name="Thread Id", name="output_thread_id", method="get_thread_id", hidden=True), + Output(display_name="Assistant Id", name="output_assistant_id", method="get_assistant_id", hidden=True), + Output(display_name="Vector Store Id", name="output_vs_id", method="get_vs_id", hidden=True), ] def __init__(self, **kwargs) -> None: @@ -75,22 +150,33 @@ def __init__(self, **kwargs) -> None: self._tool_output: Message = None # type: ignore[assignment] self._thread_id: Message = None # type: ignore[assignment] self._assistant_id: Message = None # type: ignore[assignment] + self._vs_id: Message = None # type: ignore[assignment] self.client = get_patched_openai_client(self._shared_component_cache) + self.input_tools: list[Any] async def get_assistant_response(self) -> Message: await self.initialize() + self.status = self._assistant_response return self._assistant_response + async def get_vs_id(self) -> Message: + await self.initialize() + self.status = self._vs_id + return self._vs_id + async def get_tool_output(self) -> Message: await self.initialize() + self.status = self._tool_output return self._tool_output async def get_thread_id(self) -> Message: await self.initialize() + self.status = self._thread_id return self._thread_id async def get_assistant_id(self) -> Message: await self.initialize() + self.status = self._assistant_id return self._assistant_id async def initialize(self) -> None: @@ -101,19 +187,37 @@ async def initialize(self) -> None: async def process_inputs(self) -> None: logger.info(f"env_set is {self.env_set}") - logger.info(self.tool) + logger.info(self.input_tools) tools = [] tool_obj = None - if self.tool: - tool_cls = tools_and_names[self.tool] - tool_obj = tool_cls() + if self.input_tools is None: + self.input_tools = [] + for tool in self.input_tools: + tool_obj = wrap_base_tool_as_tool_interface(tool) tools.append(tool_obj) + assistant_id = None thread_id = None if self.input_assistant_id: assistant_id = self.input_assistant_id if self.input_thread_id: thread_id = self.input_thread_id + + if hasattr(self, "graph"): + session_id = self.graph.session_id + elif hasattr(self, "_session_id"): + session_id = self._session_id + else: + session_id = None + + agent_message = Message( + sender=MESSAGE_SENDER_AI, + sender_name=self.display_name or "Astra Assistant", + properties={"icon": "Bot", "state": "partial"}, + content_blocks=[ContentBlock(title="Assistant Steps", contents=[])], + session_id=session_id, + ) + assistant_manager = AssistantManager( instructions=self.instructions, model=self.model_name, @@ -124,12 +228,79 @@ async def process_inputs(self) -> None: assistant_id=assistant_id, ) - content = self.user_message - result = await assistant_manager.run_thread(content=content, tool=tool_obj) - self._assistant_response = Message(text=result["text"]) - if "decision" in result: - self._tool_output = Message(text=str(result["decision"].is_complete)) - else: - self._tool_output = Message(text=result["text"]) - self._thread_id = Message(text=assistant_manager.thread.id) - self._assistant_id = Message(text=assistant_manager.assistant.id) + if self.file: + file = await to_thread(sync_upload, self.file, assistant_manager.client) + vector_store = assistant_manager.client.beta.vector_stores.create(name="my_vs", file_ids=[file.id]) + assistant_tools = assistant_manager.assistant.tools + assistant_tools += [{"type": "file_search"}] + assistant = assistant_manager.client.beta.assistants.update( + assistant_manager.assistant.id, + tools=assistant_tools, + tool_resources={"file_search": {"vector_store_ids": [vector_store.id]}}, + ) + assistant_manager.assistant = assistant + + async def step_iterator(): + # Initial event + yield {"event": "on_chain_start", "name": "AstraAssistant", "data": {"input": {"text": self.user_message}}} + + content = self.user_message + result = await assistant_manager.run_thread(content=content, tool=tool_obj) + + # Tool usage if present + if "output" in result and "arguments" in result: + yield {"event": "on_tool_start", "name": "tool", "data": {"input": {"text": str(result["arguments"])}}} + yield {"event": "on_tool_end", "name": "tool", "data": {"output": result["output"]}} + + if "file_search" in result and result["file_search"] is not None: + yield {"event": "on_tool_start", "name": "tool", "data": {"input": {"text": self.user_message}}} + file_search_str = "" + for chunk in result["file_search"].to_dict().get("chunks", []): + file_search_str += f"## Chunk ID: `{chunk['chunk_id']}`\n" + file_search_str += f"**Content:**\n\n```\n{chunk['content']}\n```\n\n" + if "score" in chunk: + file_search_str += f"**Score:** {chunk['score']}\n\n" + if "file_id" in chunk: + file_search_str += f"**File ID:** `{chunk['file_id']}`\n\n" + if "file_name" in chunk: + file_search_str += f"**File Name:** `{chunk['file_name']}`\n\n" + if "bytes" in chunk: + file_search_str += f"**Bytes:** {chunk['bytes']}\n\n" + if "search_string" in chunk: + file_search_str += f"**Search String:** {chunk['search_string']}\n\n" + yield {"event": "on_tool_end", "name": "tool", "data": {"output": file_search_str}} + + if "text" not in result: + msg = f"No text in result, {result}" + raise ValueError(msg) + + self._assistant_response = Message(text=result["text"]) + if "decision" in result: + self._tool_output = Message(text=str(result["decision"].is_complete)) + else: + self._tool_output = Message(text=result["text"]) + self._thread_id = Message(text=assistant_manager.thread.id) + self._assistant_id = Message(text=assistant_manager.assistant.id) + + # Final event - format it like AgentFinish to match the expected format + yield { + "event": "on_chain_end", + "name": "AstraAssistant", + "data": {"output": AgentFinish(return_values={"output": result["text"]}, log="")}, + } + + try: + if hasattr(self, "send_message"): + processed_result = await process_agent_events( + step_iterator(), + agent_message, + cast("SendMessageFunctionType", self.send_message), + ) + self.status = processed_result + except ExceptionWithMessageError as e: + msg_id = e.agent_message.id + await delete_message(id_=msg_id) + await self._send_message_event(e.agent_message, category="remove_message") + raise + except Exception: + raise diff --git a/uv.lock b/uv.lock index d50afab6e1a4..02715dd2f274 100644 --- a/uv.lock +++ b/uv.lock @@ -324,7 +324,7 @@ wheels = [ [[package]] name = "astra-assistants" -version = "2.2.7" +version = "2.2.9" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, @@ -339,9 +339,9 @@ dependencies = [ { name = "tree-sitter" }, { name = "tree-sitter-python" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/81/e2/c440ba3fe475088537c7c258b2f7689b1c9724bf46cd62d7de77e3ddc79f/astra_assistants-2.2.7.tar.gz", hash = "sha256:dd88adad9a74c9839c6faade1ccdfb47b827c82f2ed2a6da92731b4190506774", size = 67687 } +sdist = { url = "https://files.pythonhosted.org/packages/05/88/37b7ba47e7e639588a9068bfc90b4f3cbd964a8a2f4153e69b40e2165648/astra_assistants-2.2.9.tar.gz", hash = "sha256:b33e6a31d08155917e6b5413f986c278efcaa8e1c5a03ca1563e92ca0130a807", size = 67838 } wheels = [ - { url = "https://files.pythonhosted.org/packages/25/61/8a165e4ed492dae66d278d485eb505689273f2a266e34e2a21bac0bec4a6/astra_assistants-2.2.7-py3-none-any.whl", hash = "sha256:2d12999f97f57a45f24c3236af7b8792de317584e58cc7eaf771e5a440a26f2d", size = 78374 }, + { url = "https://files.pythonhosted.org/packages/f3/32/30a69010077a71ef5fd80c71296b2976946b140e8274ff37b44552c54ef4/astra_assistants-2.2.9-py3-none-any.whl", hash = "sha256:b5b2713cd32ac2050e4f28b1d748cb4701c021d802912837b943c65861699a4e", size = 78527 }, ] [package.optional-dependencies] @@ -532,7 +532,7 @@ name = "blessed" version = "1.20.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "jinxed", marker = "platform_system == 'Windows'" }, + { name = "jinxed", marker = "sys_platform == 'win32'" }, { name = "six" }, { name = "wcwidth" }, ] @@ -954,7 +954,7 @@ name = "click" version = "8.1.8" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "colorama", marker = "platform_system == 'Windows'" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593 } wheels = [ @@ -3156,7 +3156,7 @@ name = "ipykernel" version = "6.29.5" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "appnope", marker = "platform_system == 'Darwin'" }, + { name = "appnope", marker = "sys_platform == 'darwin'" }, { name = "comm" }, { name = "debugpy" }, { name = "ipython" }, @@ -3247,7 +3247,7 @@ name = "jinxed" version = "1.3.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "ansicon", marker = "platform_system == 'Windows'" }, + { name = "ansicon", marker = "sys_platform == 'win32'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/20/d0/59b2b80e7a52d255f9e0ad040d2e826342d05580c4b1d7d7747cfb8db731/jinxed-1.3.0.tar.gz", hash = "sha256:1593124b18a41b7a3da3b078471442e51dbad3d77b4d4f2b0c26ab6f7d660dbf", size = 80981 } wheels = [ @@ -4094,7 +4094,7 @@ requires-dist = [ { name = "aiofile", specifier = ">=3.9.0,<4.0.0" }, { name = "arize-phoenix-otel", specifier = ">=0.6.1" }, { name = "assemblyai", specifier = "==0.35.1" }, - { name = "astra-assistants", extras = ["tools"], specifier = "~=2.2.6" }, + { name = "astra-assistants", extras = ["tools"], specifier = "~=2.2.9" }, { name = "atlassian-python-api", specifier = "==3.41.16" }, { name = "beautifulsoup4", specifier = "==4.12.3" }, { name = "boto3", specifier = "==1.34.162" }, @@ -6164,7 +6164,7 @@ name = "portalocker" version = "2.10.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "pywin32", marker = "platform_system == 'Windows'" }, + { name = "pywin32", marker = "sys_platform == 'win32'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/ed/d3/c6c64067759e87af98cc668c1cc75171347d0f1577fab7ca3749134e3cd4/portalocker-2.10.1.tar.gz", hash = "sha256:ef1bf844e878ab08aee7e40184156e1151f228f103aa5c6bd0724cc330960f8f", size = 40891 } wheels = [ @@ -8661,19 +8661,19 @@ dependencies = [ { name = "fsspec" }, { name = "jinja2" }, { name = "networkx" }, - { name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, - { name = "nvidia-cuda-cupti-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, - { name = "nvidia-cuda-nvrtc-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, - { name = "nvidia-cuda-runtime-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, - { name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, - { name = "nvidia-cufft-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, - { name = "nvidia-curand-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, - { name = "nvidia-cusolver-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, - { name = "nvidia-cusparse-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, - { name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, - { name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, + { name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cuda-cupti-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cuda-nvrtc-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cuda-runtime-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cufft-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-curand-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cusolver-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cusparse-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, { name = "sympy" }, - { name = "triton", marker = "python_full_version < '3.13' and platform_machine == 'x86_64' and platform_system == 'Linux'" }, + { name = "triton", marker = "python_full_version < '3.13' and platform_machine == 'x86_64' and sys_platform == 'linux'" }, { name = "typing-extensions" }, ] wheels = [ @@ -8714,7 +8714,7 @@ name = "tqdm" version = "4.67.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "colorama", marker = "platform_system == 'Windows'" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737 } wheels = [ From e1b5c70fa351e06536d250ee3528240b73b02bcc Mon Sep 17 00:00:00 2001 From: Cristhian Zanforlin Lousa Date: Thu, 16 Jan 2025 18:11:07 -0300 Subject: [PATCH 18/22] refactor: enhance flow type safety and clean up unused code (#5669) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 📝 (use-save-flow.ts): add AllNodeType and EdgeType imports to improve type safety in useSaveFlow hook 📝 (index.tsx): remove unused setNoticeData function to clean up code and improve readability * refactor: Remove unused code in GeneralPage component * refactor: Remove unused code in cardComponent/index.tsx --------- Co-authored-by: anovazzi1 --- src/frontend/src/components/core/cardComponent/index.tsx | 1 - src/frontend/src/hooks/flows/use-save-flow.ts | 7 +++++-- .../src/pages/FlowPage/components/PageComponent/index.tsx | 1 - .../src/pages/SettingsPage/pages/GeneralPage/index.tsx | 7 +------ 4 files changed, 6 insertions(+), 10 deletions(-) diff --git a/src/frontend/src/components/core/cardComponent/index.tsx b/src/frontend/src/components/core/cardComponent/index.tsx index 231602533276..6e446fd6ce05 100644 --- a/src/frontend/src/components/core/cardComponent/index.tsx +++ b/src/frontend/src/components/core/cardComponent/index.tsx @@ -18,7 +18,6 @@ import { } from "../../ui/card"; import { Checkbox } from "../../ui/checkbox"; import { FormControl, FormField } from "../../ui/form"; -import Loading from "../../ui/loading"; import useDragStart from "./hooks/use-on-drag-start"; import { convertTestName } from "./utils/convert-test-name"; diff --git a/src/frontend/src/hooks/flows/use-save-flow.ts b/src/frontend/src/hooks/flows/use-save-flow.ts index df08aad5a7a7..e4a9d36ede82 100644 --- a/src/frontend/src/hooks/flows/use-save-flow.ts +++ b/src/frontend/src/hooks/flows/use-save-flow.ts @@ -3,7 +3,7 @@ import { usePatchUpdateFlow } from "@/controllers/API/queries/flows/use-patch-up import useAlertStore from "@/stores/alertStore"; import useFlowsManagerStore from "@/stores/flowsManagerStore"; import useFlowStore from "@/stores/flowStore"; -import { FlowType } from "@/types/flow"; +import { AllNodeType, EdgeType, FlowType } from "@/types/flow"; import { customStringify } from "@/utils/reactflowUtils"; import { ReactFlowJsonObject } from "@xyflow/react"; @@ -52,7 +52,10 @@ const useSaveFlow = () => { { id: flow!.id }, { onSuccess: (flowResponse) => { - flow!.data = flowResponse.data as ReactFlowJsonObject; + flow!.data = flowResponse.data as ReactFlowJsonObject< + AllNodeType, + EdgeType + >; }, }, ); diff --git a/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx b/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx index d5f6d2c3e83b..80001245413f 100644 --- a/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx +++ b/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx @@ -115,7 +115,6 @@ export default function Page({ view }: { view?: boolean }): JSX.Element { ); const onConnect = useFlowStore((state) => state.onConnect); const setErrorData = useAlertStore((state) => state.setErrorData); - const setNoticeData = useAlertStore((state) => state.setNoticeData); const updateCurrentFlow = useFlowStore((state) => state.updateCurrentFlow); const [selectionMenuVisible, setSelectionMenuVisible] = useState(false); const edgeUpdateSuccessful = useRef(true); diff --git a/src/frontend/src/pages/SettingsPage/pages/GeneralPage/index.tsx b/src/frontend/src/pages/SettingsPage/pages/GeneralPage/index.tsx index 2f6546201506..719c551746d2 100644 --- a/src/frontend/src/pages/SettingsPage/pages/GeneralPage/index.tsx +++ b/src/frontend/src/pages/SettingsPage/pages/GeneralPage/index.tsx @@ -24,7 +24,6 @@ import { patchUserInputStateType, } from "../../../../types/components"; import useScrollToElement from "../hooks/use-scroll-to-element"; -import StoreApiKeyFormComponent from "../StoreApiKeyPage/components/StoreApiKeyForm"; import GeneralPageHeaderComponent from "./components/GeneralPageHeader"; import PasswordFormComponent from "./components/PasswordForm"; import ProfilePictureFormComponent from "./components/ProfilePictureForm"; @@ -39,11 +38,7 @@ export const GeneralPage = () => { const setSuccessData = useAlertStore((state) => state.setSuccessData); const setErrorData = useAlertStore((state) => state.setErrorData); const { userData, setUserData } = useContext(AuthContext); - const hasStore = useStoreStore((state) => state.hasStore); - const validApiKey = useStoreStore((state) => state.validApiKey); - const hasApiKey = useStoreStore((state) => state.hasApiKey); - const loadingApiKey = useStoreStore((state) => state.loadingApiKey); - const { password, cnfPassword, profilePicture, apikey } = inputState; + const { password, cnfPassword, profilePicture } = inputState; const autoLogin = useAuthStore((state) => state.autoLogin); const { storeApiKey } = useContext(AuthContext); From c51e57c7deacfd99a57623ed9f450e0497b8ddc1 Mon Sep 17 00:00:00 2001 From: VICTOR CORREA GOMES <112295415+Vigtu@users.noreply.github.com> Date: Thu, 16 Jan 2025 18:26:09 -0300 Subject: [PATCH 19/22] feat: Add `required=True` to essential inputs across Langflow components (#5739) * fix: add required validation to input fields Ensures mandatory fields are properly marked as required across components. * fix: add required validation to input fields Ensures mandatory fields are properly marked as required across components. * fix: add required validation to input fields field: model_name * fix: add required validation to input fields field: model and base_url * fix: add required validation to input fields input: mistral_api_key * fix: add required validation to input fields inputs: model, base_url, nvidia_api_key * fix: add required validation to input fields inputs: model, base_url * fix: add required validation to input fields input: openai_api_key * fix: add required validation to input fields inputs: message, embedding_model * fix: add required validation to input fields inputs: model_name, credentials * fix: add required validation to input fields inputs: aws_secret_access_key, aws_access_key_id * fix: add required validation to input fields inputs: input_text, match_text * fix: add required validation to input fields inputs: input_message * fix: add required validation to input fields inputs: input_value * fix: add required validation to input fields input: data_input * fix: add required validation to input fields inputs: input_value * fix: add required validation to input fields input: data_input * fix: add required validation to input fields input: data_input * fix: add required validation to input fields input: data_input * fix: add required validation to input fields input: data_input * fix: add required validation to input fields inputs: data_inputs, embeddings * fix: add required validation to input fields inputs: api_key, input_value * fix: add required validation to input fields inputs: password, username, openai_api_key, prompt * fix: add required validation to input fields inputs: api_key, transcription_result * fix: add required validation to input fields inputs: api_key, transcription_result, prompt * fix: add required validation to input fields input: prompt * fix: add required validation to input fields input: api_key * fix: add required validation to input fields inputs: api_key, transcript_id * fix: add required validation to input fields inputs: audio_file, api_key * [autofix.ci] apply automated fixes * [autofix.ci] apply automated fixes (attempt 2/3) --------- Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- .../assemblyai/assemblyai_get_subtitles.py | 2 ++ .../components/assemblyai/assemblyai_lemur.py | 8 +++---- .../assemblyai/assemblyai_list_transcripts.py | 1 + .../assemblyai/assemblyai_poll_transcript.py | 2 ++ .../assemblyai/assemblyai_start_transcript.py | 2 ++ .../components/embeddings/amazon_bedrock.py | 2 ++ .../embeddings/google_generative_ai.py | 2 +- .../embeddings/huggingface_inference_api.py | 1 + .../embeddings/lmstudioembeddings.py | 2 ++ .../langflow/components/embeddings/mistral.py | 2 +- .../langflow/components/embeddings/nvidia.py | 3 +++ .../langflow/components/embeddings/ollama.py | 2 ++ .../langflow/components/embeddings/openai.py | 2 +- .../components/embeddings/similarity.py | 1 + .../components/embeddings/text_embedder.py | 2 ++ .../components/embeddings/vertexai.py | 3 ++- .../components/helpers/structured_output.py | 1 + .../icosacomputing/combinatorial_reasoner.py | 5 +++- .../langchain_utilities/character.py | 1 + .../langchain_utilities/csv_agent.py | 1 + .../html_link_extractor.py | 1 + .../langchain_utilities/language_recursive.py | 1 + .../langchain_utilities/language_semantic.py | 2 ++ .../langchain_utilities/natural_language.py | 1 + .../recursive_character.py | 1 + .../components/logic/conditional_router.py | 2 ++ .../langflow/components/logic/pass_message.py | 1 + .../components/notdiamond/notdiamond.py | 3 ++- .../Graph Vector Store RAG.json | 24 +++++++++++-------- .../starter_projects/Vector Store RAG.json | 16 ++++++++----- 30 files changed, 70 insertions(+), 27 deletions(-) diff --git a/src/backend/base/langflow/components/assemblyai/assemblyai_get_subtitles.py b/src/backend/base/langflow/components/assemblyai/assemblyai_get_subtitles.py index 687461442cdf..12dfb5624f13 100644 --- a/src/backend/base/langflow/components/assemblyai/assemblyai_get_subtitles.py +++ b/src/backend/base/langflow/components/assemblyai/assemblyai_get_subtitles.py @@ -17,11 +17,13 @@ class AssemblyAIGetSubtitles(Component): name="api_key", display_name="Assembly API Key", info="Your AssemblyAI API key. You can get one from https://www.assemblyai.com/", + required=True, ), DataInput( name="transcription_result", display_name="Transcription Result", info="The transcription result from AssemblyAI", + required=True, ), DropdownInput( name="subtitle_format", diff --git a/src/backend/base/langflow/components/assemblyai/assemblyai_lemur.py b/src/backend/base/langflow/components/assemblyai/assemblyai_lemur.py index a0d357bcb260..059914cda4b8 100644 --- a/src/backend/base/langflow/components/assemblyai/assemblyai_lemur.py +++ b/src/backend/base/langflow/components/assemblyai/assemblyai_lemur.py @@ -18,17 +18,15 @@ class AssemblyAILeMUR(Component): display_name="Assembly API Key", info="Your AssemblyAI API key. You can get one from https://www.assemblyai.com/", advanced=False, + required=True, ), DataInput( name="transcription_result", display_name="Transcription Result", info="The transcription result from AssemblyAI", + required=True, ), - MultilineInput( - name="prompt", - display_name="Input Prompt", - info="The text to prompt the model", - ), + MultilineInput(name="prompt", display_name="Input Prompt", info="The text to prompt the model", required=True), DropdownInput( name="final_model", display_name="Final Model", diff --git a/src/backend/base/langflow/components/assemblyai/assemblyai_list_transcripts.py b/src/backend/base/langflow/components/assemblyai/assemblyai_list_transcripts.py index de96112bb0f1..369b85de247a 100644 --- a/src/backend/base/langflow/components/assemblyai/assemblyai_list_transcripts.py +++ b/src/backend/base/langflow/components/assemblyai/assemblyai_list_transcripts.py @@ -17,6 +17,7 @@ class AssemblyAIListTranscripts(Component): name="api_key", display_name="Assembly API Key", info="Your AssemblyAI API key. You can get one from https://www.assemblyai.com/", + required=True, ), IntInput( name="limit", diff --git a/src/backend/base/langflow/components/assemblyai/assemblyai_poll_transcript.py b/src/backend/base/langflow/components/assemblyai/assemblyai_poll_transcript.py index 13d01e5daa06..f2ab67839358 100644 --- a/src/backend/base/langflow/components/assemblyai/assemblyai_poll_transcript.py +++ b/src/backend/base/langflow/components/assemblyai/assemblyai_poll_transcript.py @@ -18,11 +18,13 @@ class AssemblyAITranscriptionJobPoller(Component): name="api_key", display_name="Assembly API Key", info="Your AssemblyAI API key. You can get one from https://www.assemblyai.com/", + required=True, ), DataInput( name="transcript_id", display_name="Transcript ID", info="The ID of the transcription job to poll", + required=True, ), FloatInput( name="polling_interval", diff --git a/src/backend/base/langflow/components/assemblyai/assemblyai_start_transcript.py b/src/backend/base/langflow/components/assemblyai/assemblyai_start_transcript.py index de83a59e3d76..48cf11ef51e8 100644 --- a/src/backend/base/langflow/components/assemblyai/assemblyai_start_transcript.py +++ b/src/backend/base/langflow/components/assemblyai/assemblyai_start_transcript.py @@ -19,6 +19,7 @@ class AssemblyAITranscriptionJobCreator(Component): name="api_key", display_name="Assembly API Key", info="Your AssemblyAI API key. You can get one from https://www.assemblyai.com/", + required=True, ), FileInput( name="audio_file", @@ -65,6 +66,7 @@ class AssemblyAITranscriptionJobCreator(Component): "mxf", ], info="The audio file to transcribe", + required=True, ), MessageTextInput( name="audio_file_url", diff --git a/src/backend/base/langflow/components/embeddings/amazon_bedrock.py b/src/backend/base/langflow/components/embeddings/amazon_bedrock.py index caeafc91aa1a..266f6b11fe7d 100644 --- a/src/backend/base/langflow/components/embeddings/amazon_bedrock.py +++ b/src/backend/base/langflow/components/embeddings/amazon_bedrock.py @@ -24,6 +24,7 @@ class AmazonBedrockEmbeddingsComponent(LCModelComponent): info="The access key for your AWS account." "Usually set in Python code as the environment variable 'AWS_ACCESS_KEY_ID'.", value="AWS_ACCESS_KEY_ID", + required=True, ), SecretStrInput( name="aws_secret_access_key", @@ -31,6 +32,7 @@ class AmazonBedrockEmbeddingsComponent(LCModelComponent): info="The secret key for your AWS account. " "Usually set in Python code as the environment variable 'AWS_SECRET_ACCESS_KEY'.", value="AWS_SECRET_ACCESS_KEY", + required=True, ), SecretStrInput( name="aws_session_token", diff --git a/src/backend/base/langflow/components/embeddings/google_generative_ai.py b/src/backend/base/langflow/components/embeddings/google_generative_ai.py index 8c27561ed7f9..94a3c5a43830 100644 --- a/src/backend/base/langflow/components/embeddings/google_generative_ai.py +++ b/src/backend/base/langflow/components/embeddings/google_generative_ai.py @@ -21,7 +21,7 @@ class GoogleGenerativeAIEmbeddingsComponent(Component): name = "Google Generative AI Embeddings" inputs = [ - SecretStrInput(name="api_key", display_name="API Key"), + SecretStrInput(name="api_key", display_name="API Key", required=True), MessageTextInput(name="model_name", display_name="Model Name", value="models/text-embedding-004"), ] diff --git a/src/backend/base/langflow/components/embeddings/huggingface_inference_api.py b/src/backend/base/langflow/components/embeddings/huggingface_inference_api.py index 1338b125b734..34c426ac0b73 100644 --- a/src/backend/base/langflow/components/embeddings/huggingface_inference_api.py +++ b/src/backend/base/langflow/components/embeddings/huggingface_inference_api.py @@ -36,6 +36,7 @@ class HuggingFaceInferenceAPIEmbeddingsComponent(LCEmbeddingsModel): display_name="Model Name", value="BAAI/bge-large-en-v1.5", info="The name of the model to use for text embeddings.", + required=True, ), ] diff --git a/src/backend/base/langflow/components/embeddings/lmstudioembeddings.py b/src/backend/base/langflow/components/embeddings/lmstudioembeddings.py index 11ffe02305c3..57f783fc1ff7 100644 --- a/src/backend/base/langflow/components/embeddings/lmstudioembeddings.py +++ b/src/backend/base/langflow/components/embeddings/lmstudioembeddings.py @@ -49,12 +49,14 @@ async def get_model(base_url_value: str) -> list[str]: display_name="Model", advanced=False, refresh_button=True, + required=True, ), MessageTextInput( name="base_url", display_name="LM Studio Base URL", refresh_button=True, value="http://localhost:1234/v1", + required=True, ), SecretStrInput( name="api_key", diff --git a/src/backend/base/langflow/components/embeddings/mistral.py b/src/backend/base/langflow/components/embeddings/mistral.py index 7aaec00b3f9a..e183d0165235 100644 --- a/src/backend/base/langflow/components/embeddings/mistral.py +++ b/src/backend/base/langflow/components/embeddings/mistral.py @@ -20,7 +20,7 @@ class MistralAIEmbeddingsComponent(LCModelComponent): options=["mistral-embed"], value="mistral-embed", ), - SecretStrInput(name="mistral_api_key", display_name="Mistral API Key"), + SecretStrInput(name="mistral_api_key", display_name="Mistral API Key", required=True), IntInput( name="max_concurrent_requests", display_name="Max Concurrent Requests", diff --git a/src/backend/base/langflow/components/embeddings/nvidia.py b/src/backend/base/langflow/components/embeddings/nvidia.py index 1aca0a33df0f..302fd8300a36 100644 --- a/src/backend/base/langflow/components/embeddings/nvidia.py +++ b/src/backend/base/langflow/components/embeddings/nvidia.py @@ -21,12 +21,14 @@ class NVIDIAEmbeddingsComponent(LCEmbeddingsModel): "snowflake/arctic-embed-I", ], value="nvidia/nv-embed-v1", + required=True, ), MessageTextInput( name="base_url", display_name="NVIDIA Base URL", refresh_button=True, value="https://integrate.api.nvidia.com/v1", + required=True, ), SecretStrInput( name="nvidia_api_key", @@ -34,6 +36,7 @@ class NVIDIAEmbeddingsComponent(LCEmbeddingsModel): info="The NVIDIA API Key.", advanced=False, value="NVIDIA_API_KEY", + required=True, ), FloatInput( name="temperature", diff --git a/src/backend/base/langflow/components/embeddings/ollama.py b/src/backend/base/langflow/components/embeddings/ollama.py index e5c83ef9e2ee..f3e9e9051510 100644 --- a/src/backend/base/langflow/components/embeddings/ollama.py +++ b/src/backend/base/langflow/components/embeddings/ollama.py @@ -17,11 +17,13 @@ class OllamaEmbeddingsComponent(LCModelComponent): name="model", display_name="Ollama Model", value="nomic-embed-text", + required=True, ), MessageTextInput( name="base_url", display_name="Ollama Base URL", value="http://localhost:11434", + required=True, ), ] diff --git a/src/backend/base/langflow/components/embeddings/openai.py b/src/backend/base/langflow/components/embeddings/openai.py index 6c075b97d6e2..e4ae4e6f54cf 100644 --- a/src/backend/base/langflow/components/embeddings/openai.py +++ b/src/backend/base/langflow/components/embeddings/openai.py @@ -38,7 +38,7 @@ class OpenAIEmbeddingsComponent(LCEmbeddingsModel): value="text-embedding-3-small", ), DictInput(name="model_kwargs", display_name="Model Kwargs", advanced=True), - SecretStrInput(name="openai_api_key", display_name="OpenAI API Key", value="OPENAI_API_KEY"), + SecretStrInput(name="openai_api_key", display_name="OpenAI API Key", value="OPENAI_API_KEY", required=True), MessageTextInput(name="openai_api_base", display_name="OpenAI API Base", advanced=True), MessageTextInput(name="openai_api_type", display_name="OpenAI API Type", advanced=True), MessageTextInput(name="openai_api_version", display_name="OpenAI API Version", advanced=True), diff --git a/src/backend/base/langflow/components/embeddings/similarity.py b/src/backend/base/langflow/components/embeddings/similarity.py index 914943edb5c7..7913ab3fd56c 100644 --- a/src/backend/base/langflow/components/embeddings/similarity.py +++ b/src/backend/base/langflow/components/embeddings/similarity.py @@ -16,6 +16,7 @@ class EmbeddingSimilarityComponent(Component): display_name="Embedding Vectors", info="A list containing exactly two data objects with embedding vectors to compare.", is_list=True, + required=True, ), DropdownInput( name="similarity_metric", diff --git a/src/backend/base/langflow/components/embeddings/text_embedder.py b/src/backend/base/langflow/components/embeddings/text_embedder.py index d9a40e2ec285..5c66a9372ebe 100644 --- a/src/backend/base/langflow/components/embeddings/text_embedder.py +++ b/src/backend/base/langflow/components/embeddings/text_embedder.py @@ -20,11 +20,13 @@ class TextEmbedderComponent(Component): display_name="Embedding Model", info="The embedding model to use for generating embeddings.", input_types=["Embeddings"], + required=True, ), MessageInput( name="message", display_name="Message", info="The message to generate embeddings for.", + required=True, ), ] outputs = [ diff --git a/src/backend/base/langflow/components/embeddings/vertexai.py b/src/backend/base/langflow/components/embeddings/vertexai.py index 6c74f265134e..026dd5d41a15 100644 --- a/src/backend/base/langflow/components/embeddings/vertexai.py +++ b/src/backend/base/langflow/components/embeddings/vertexai.py @@ -16,12 +16,13 @@ class VertexAIEmbeddingsComponent(LCModelComponent): info="JSON credentials file. Leave empty to fallback to environment variables", value="", file_types=["json"], + required=True, ), MessageTextInput(name="location", display_name="Location", value="us-central1", advanced=True), MessageTextInput(name="project", display_name="Project", info="The project ID.", advanced=True), IntInput(name="max_output_tokens", display_name="Max Output Tokens", advanced=True), IntInput(name="max_retries", display_name="Max Retries", value=1, advanced=True), - MessageTextInput(name="model_name", display_name="Model Name", value="textembedding-gecko"), + MessageTextInput(name="model_name", display_name="Model Name", value="textembedding-gecko", required=True), IntInput(name="n", display_name="N", value=1, advanced=True), IntInput(name="request_parallelism", value=5, display_name="Request Parallelism", advanced=True), MessageTextInput(name="stop_sequences", display_name="Stop", advanced=True, is_list=True), diff --git a/src/backend/base/langflow/components/helpers/structured_output.py b/src/backend/base/langflow/components/helpers/structured_output.py index 95f0da21e8d4..2db37a605768 100644 --- a/src/backend/base/langflow/components/helpers/structured_output.py +++ b/src/backend/base/langflow/components/helpers/structured_output.py @@ -34,6 +34,7 @@ class StructuredOutputComponent(Component): display_name="Input Message", info="The input message to the language model.", tool_mode=True, + required=True, ), StrInput( name="schema_name", diff --git a/src/backend/base/langflow/components/icosacomputing/combinatorial_reasoner.py b/src/backend/base/langflow/components/icosacomputing/combinatorial_reasoner.py index 217bfb318358..22b2e782d507 100644 --- a/src/backend/base/langflow/components/icosacomputing/combinatorial_reasoner.py +++ b/src/backend/base/langflow/components/icosacomputing/combinatorial_reasoner.py @@ -16,25 +16,28 @@ class CombinatorialReasonerComponent(Component): name = "Combinatorial Reasoner" inputs = [ - MessageTextInput(name="prompt", display_name="Prompt"), + MessageTextInput(name="prompt", display_name="Prompt", required=True), SecretStrInput( name="openai_api_key", display_name="OpenAI API Key", info="The OpenAI API Key to use for the OpenAI model.", advanced=False, value="OPENAI_API_KEY", + required=True, ), StrInput( name="username", display_name="Username", info="Username to authenticate access to Icosa CR API", advanced=False, + required=True, ), SecretStrInput( name="password", display_name="Password", info="Password to authenticate access to Icosa CR API.", advanced=False, + required=True, ), DropdownInput( name="model_name", diff --git a/src/backend/base/langflow/components/langchain_utilities/character.py b/src/backend/base/langflow/components/langchain_utilities/character.py index 0dff4f13fe3b..92dacd519e36 100644 --- a/src/backend/base/langflow/components/langchain_utilities/character.py +++ b/src/backend/base/langflow/components/langchain_utilities/character.py @@ -32,6 +32,7 @@ class CharacterTextSplitterComponent(LCTextSplitterComponent): display_name="Input", info="The texts to split.", input_types=["Document", "Data"], + required=True, ), MessageTextInput( name="separator", diff --git a/src/backend/base/langflow/components/langchain_utilities/csv_agent.py b/src/backend/base/langflow/components/langchain_utilities/csv_agent.py index 3e1e322410d6..ecfc3a415509 100644 --- a/src/backend/base/langflow/components/langchain_utilities/csv_agent.py +++ b/src/backend/base/langflow/components/langchain_utilities/csv_agent.py @@ -43,6 +43,7 @@ class CSVAgentComponent(LCAgentComponent): name="input_value", display_name="Text", info="Text to be passed as input and extract info from the CSV File.", + required=True, ), DictInput( name="pandas_kwargs", diff --git a/src/backend/base/langflow/components/langchain_utilities/html_link_extractor.py b/src/backend/base/langflow/components/langchain_utilities/html_link_extractor.py index 824b04ea16fb..0d3e654abb05 100644 --- a/src/backend/base/langflow/components/langchain_utilities/html_link_extractor.py +++ b/src/backend/base/langflow/components/langchain_utilities/html_link_extractor.py @@ -22,6 +22,7 @@ class HtmlLinkExtractorComponent(LCDocumentTransformerComponent): display_name="Input", info="The texts from which to extract links.", input_types=["Document", "Data"], + required=True, ), ] diff --git a/src/backend/base/langflow/components/langchain_utilities/language_recursive.py b/src/backend/base/langflow/components/langchain_utilities/language_recursive.py index 0a454cf43b81..66f909e96991 100644 --- a/src/backend/base/langflow/components/langchain_utilities/language_recursive.py +++ b/src/backend/base/langflow/components/langchain_utilities/language_recursive.py @@ -31,6 +31,7 @@ class LanguageRecursiveTextSplitterComponent(LCTextSplitterComponent): display_name="Input", info="The texts to split.", input_types=["Document", "Data"], + required=True, ), DropdownInput( name="code_language", display_name="Code Language", options=[x.value for x in Language], value="python" diff --git a/src/backend/base/langflow/components/langchain_utilities/language_semantic.py b/src/backend/base/langflow/components/langchain_utilities/language_semantic.py index 261e6e294b56..6edad7d1c24e 100644 --- a/src/backend/base/langflow/components/langchain_utilities/language_semantic.py +++ b/src/backend/base/langflow/components/langchain_utilities/language_semantic.py @@ -30,6 +30,7 @@ class SemanticTextSplitterComponent(LCTextSplitterComponent): info="List of Data objects containing text and metadata to split.", input_types=["Data"], is_list=True, + required=True, ), HandleInput( name="embeddings", @@ -37,6 +38,7 @@ class SemanticTextSplitterComponent(LCTextSplitterComponent): info="Embeddings model to use for semantic similarity. Required.", input_types=["Embeddings"], is_list=False, + required=True, ), DropdownInput( name="breakpoint_threshold_type", diff --git a/src/backend/base/langflow/components/langchain_utilities/natural_language.py b/src/backend/base/langflow/components/langchain_utilities/natural_language.py index 3a3b3a93874e..f6e558d9cce3 100644 --- a/src/backend/base/langflow/components/langchain_utilities/natural_language.py +++ b/src/backend/base/langflow/components/langchain_utilities/natural_language.py @@ -33,6 +33,7 @@ class NaturalLanguageTextSplitterComponent(LCTextSplitterComponent): display_name="Input", info="The text data to be split.", input_types=["Document", "Data"], + required=True, ), MessageTextInput( name="separator", diff --git a/src/backend/base/langflow/components/langchain_utilities/recursive_character.py b/src/backend/base/langflow/components/langchain_utilities/recursive_character.py index 425f4d9a07c4..86d728875a0f 100644 --- a/src/backend/base/langflow/components/langchain_utilities/recursive_character.py +++ b/src/backend/base/langflow/components/langchain_utilities/recursive_character.py @@ -32,6 +32,7 @@ class RecursiveCharacterTextSplitterComponent(LCTextSplitterComponent): display_name="Input", info="The texts to split.", input_types=["Document", "Data"], + required=True, ), MessageTextInput( name="separators", diff --git a/src/backend/base/langflow/components/logic/conditional_router.py b/src/backend/base/langflow/components/logic/conditional_router.py index 0e6d419e1adc..a695f43b44d4 100644 --- a/src/backend/base/langflow/components/logic/conditional_router.py +++ b/src/backend/base/langflow/components/logic/conditional_router.py @@ -20,11 +20,13 @@ def __init__(self, *args, **kwargs): name="input_text", display_name="Text Input", info="The primary text input for the operation.", + required=True, ), MessageTextInput( name="match_text", display_name="Match Text", info="The text input to compare against.", + required=True, ), DropdownInput( name="operator", diff --git a/src/backend/base/langflow/components/logic/pass_message.py b/src/backend/base/langflow/components/logic/pass_message.py index ae527976c1cf..9db6d80e536d 100644 --- a/src/backend/base/langflow/components/logic/pass_message.py +++ b/src/backend/base/langflow/components/logic/pass_message.py @@ -15,6 +15,7 @@ class PassMessageComponent(Component): name="input_message", display_name="Input Message", info="The message to be passed forward.", + required=True, ), MessageInput( name="ignored_message", diff --git a/src/backend/base/langflow/components/notdiamond/notdiamond.py b/src/backend/base/langflow/components/notdiamond/notdiamond.py index 70a13212bd6f..7f6b322a2061 100644 --- a/src/backend/base/langflow/components/notdiamond/notdiamond.py +++ b/src/backend/base/langflow/components/notdiamond/notdiamond.py @@ -54,7 +54,7 @@ def __init__(self, *args, **kwargs): self._selected_model_name = None inputs = [ - MessageInput(name="input_value", display_name="Input"), + MessageInput(name="input_value", display_name="Input", required=True), MessageTextInput( name="system_message", display_name="System Message", @@ -75,6 +75,7 @@ def __init__(self, *args, **kwargs): info="The Not Diamond API Key to use for routing.", advanced=False, value="NOTDIAMOND_API_KEY", + required=True, ), StrInput( name="preference_id", diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Graph Vector Store RAG.json b/src/backend/base/langflow/initial_setup/starter_projects/Graph Vector Store RAG.json index fd1a8c8d3749..1ade22b25d29 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Graph Vector Store RAG.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Graph Vector Store RAG.json @@ -665,7 +665,9 @@ "display_name": "Embeddings", "method": "build_embeddings", "name": "embeddings", - "required_inputs": [], + "required_inputs": [ + "openai_api_key" + ], "selected": "Embeddings", "types": [ "Embeddings" @@ -731,7 +733,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from langchain_openai import OpenAIEmbeddings\n\nfrom langflow.base.embeddings.model import LCEmbeddingsModel\nfrom langflow.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom langflow.field_typing import Embeddings\nfrom langflow.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, MessageTextInput, SecretStrInput\n\n\nclass OpenAIEmbeddingsComponent(LCEmbeddingsModel):\n display_name = \"OpenAI Embeddings\"\n description = \"Generate embeddings using OpenAI models.\"\n icon = \"OpenAI\"\n name = \"OpenAIEmbeddings\"\n\n inputs = [\n DictInput(\n name=\"default_headers\",\n display_name=\"Default Headers\",\n advanced=True,\n info=\"Default headers to use for the API request.\",\n ),\n DictInput(\n name=\"default_query\",\n display_name=\"Default Query\",\n advanced=True,\n info=\"Default query parameters to use for the API request.\",\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n MessageTextInput(name=\"client\", display_name=\"Client\", advanced=True),\n MessageTextInput(name=\"deployment\", display_name=\"Deployment\", advanced=True),\n IntInput(name=\"embedding_ctx_length\", display_name=\"Embedding Context Length\", advanced=True, value=1536),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", value=3, advanced=True),\n DropdownInput(\n name=\"model\",\n display_name=\"Model\",\n advanced=False,\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=\"text-embedding-3-small\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n SecretStrInput(name=\"openai_api_key\", display_name=\"OpenAI API Key\", value=\"OPENAI_API_KEY\"),\n MessageTextInput(name=\"openai_api_base\", display_name=\"OpenAI API Base\", advanced=True),\n MessageTextInput(name=\"openai_api_type\", display_name=\"OpenAI API Type\", advanced=True),\n MessageTextInput(name=\"openai_api_version\", display_name=\"OpenAI API Version\", advanced=True),\n MessageTextInput(\n name=\"openai_organization\",\n display_name=\"OpenAI Organization\",\n advanced=True,\n ),\n MessageTextInput(name=\"openai_proxy\", display_name=\"OpenAI Proxy\", advanced=True),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n BoolInput(name=\"skip_empty\", display_name=\"Skip Empty\", advanced=True),\n MessageTextInput(\n name=\"tiktoken_model_name\",\n display_name=\"TikToken Model Name\",\n advanced=True,\n ),\n BoolInput(\n name=\"tiktoken_enable\",\n display_name=\"TikToken Enable\",\n advanced=True,\n value=True,\n info=\"If False, you must have transformers installed.\",\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n return OpenAIEmbeddings(\n client=self.client or None,\n model=self.model,\n dimensions=self.dimensions or None,\n deployment=self.deployment or None,\n api_version=self.openai_api_version or None,\n base_url=self.openai_api_base or None,\n openai_api_type=self.openai_api_type or None,\n openai_proxy=self.openai_proxy or None,\n embedding_ctx_length=self.embedding_ctx_length,\n api_key=self.openai_api_key or None,\n organization=self.openai_organization or None,\n allowed_special=\"all\",\n disallowed_special=\"all\",\n chunk_size=self.chunk_size,\n max_retries=self.max_retries,\n timeout=self.request_timeout or None,\n tiktoken_enabled=self.tiktoken_enable,\n tiktoken_model_name=self.tiktoken_model_name or None,\n show_progress_bar=self.show_progress_bar,\n model_kwargs=self.model_kwargs,\n skip_empty=self.skip_empty,\n default_headers=self.default_headers or None,\n default_query=self.default_query or None,\n )\n" + "value": "from langchain_openai import OpenAIEmbeddings\n\nfrom langflow.base.embeddings.model import LCEmbeddingsModel\nfrom langflow.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom langflow.field_typing import Embeddings\nfrom langflow.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, MessageTextInput, SecretStrInput\n\n\nclass OpenAIEmbeddingsComponent(LCEmbeddingsModel):\n display_name = \"OpenAI Embeddings\"\n description = \"Generate embeddings using OpenAI models.\"\n icon = \"OpenAI\"\n name = \"OpenAIEmbeddings\"\n\n inputs = [\n DictInput(\n name=\"default_headers\",\n display_name=\"Default Headers\",\n advanced=True,\n info=\"Default headers to use for the API request.\",\n ),\n DictInput(\n name=\"default_query\",\n display_name=\"Default Query\",\n advanced=True,\n info=\"Default query parameters to use for the API request.\",\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n MessageTextInput(name=\"client\", display_name=\"Client\", advanced=True),\n MessageTextInput(name=\"deployment\", display_name=\"Deployment\", advanced=True),\n IntInput(name=\"embedding_ctx_length\", display_name=\"Embedding Context Length\", advanced=True, value=1536),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", value=3, advanced=True),\n DropdownInput(\n name=\"model\",\n display_name=\"Model\",\n advanced=False,\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=\"text-embedding-3-small\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n SecretStrInput(name=\"openai_api_key\", display_name=\"OpenAI API Key\", value=\"OPENAI_API_KEY\", required=True),\n MessageTextInput(name=\"openai_api_base\", display_name=\"OpenAI API Base\", advanced=True),\n MessageTextInput(name=\"openai_api_type\", display_name=\"OpenAI API Type\", advanced=True),\n MessageTextInput(name=\"openai_api_version\", display_name=\"OpenAI API Version\", advanced=True),\n MessageTextInput(\n name=\"openai_organization\",\n display_name=\"OpenAI Organization\",\n advanced=True,\n ),\n MessageTextInput(name=\"openai_proxy\", display_name=\"OpenAI Proxy\", advanced=True),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n BoolInput(name=\"skip_empty\", display_name=\"Skip Empty\", advanced=True),\n MessageTextInput(\n name=\"tiktoken_model_name\",\n display_name=\"TikToken Model Name\",\n advanced=True,\n ),\n BoolInput(\n name=\"tiktoken_enable\",\n display_name=\"TikToken Enable\",\n advanced=True,\n value=True,\n info=\"If False, you must have transformers installed.\",\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n return OpenAIEmbeddings(\n client=self.client or None,\n model=self.model,\n dimensions=self.dimensions or None,\n deployment=self.deployment or None,\n api_version=self.openai_api_version or None,\n base_url=self.openai_api_base or None,\n openai_api_type=self.openai_api_type or None,\n openai_proxy=self.openai_proxy or None,\n embedding_ctx_length=self.embedding_ctx_length,\n api_key=self.openai_api_key or None,\n organization=self.openai_organization or None,\n allowed_special=\"all\",\n disallowed_special=\"all\",\n chunk_size=self.chunk_size,\n max_retries=self.max_retries,\n timeout=self.request_timeout or None,\n tiktoken_enabled=self.tiktoken_enable,\n tiktoken_model_name=self.tiktoken_model_name or None,\n show_progress_bar=self.show_progress_bar,\n model_kwargs=self.model_kwargs,\n skip_empty=self.skip_empty,\n default_headers=self.default_headers or None,\n default_query=self.default_query or None,\n )\n" }, "default_headers": { "_input_type": "DictInput", @@ -908,7 +910,7 @@ "name": "openai_api_key", "password": true, "placeholder": "", - "required": false, + "required": true, "show": true, "title_case": false, "type": "str", @@ -3344,7 +3346,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nfrom langchain_text_splitters import Language, RecursiveCharacterTextSplitter, TextSplitter\n\nfrom langflow.base.textsplitters.model import LCTextSplitterComponent\nfrom langflow.inputs import DataInput, DropdownInput, IntInput\n\n\nclass LanguageRecursiveTextSplitterComponent(LCTextSplitterComponent):\n display_name: str = \"Language Recursive Text Splitter\"\n description: str = \"Split text into chunks of a specified length based on language.\"\n documentation: str = \"https://docs.langflow.org/components/text-splitters#languagerecursivetextsplitter\"\n name = \"LanguageRecursiveTextSplitter\"\n icon = \"LangChain\"\n\n inputs = [\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n info=\"The maximum length of each chunk.\",\n value=1000,\n ),\n IntInput(\n name=\"chunk_overlap\",\n display_name=\"Chunk Overlap\",\n info=\"The amount of overlap between chunks.\",\n value=200,\n ),\n DataInput(\n name=\"data_input\",\n display_name=\"Input\",\n info=\"The texts to split.\",\n input_types=[\"Document\", \"Data\"],\n ),\n DropdownInput(\n name=\"code_language\", display_name=\"Code Language\", options=[x.value for x in Language], value=\"python\"\n ),\n ]\n\n def get_data_input(self) -> Any:\n return self.data_input\n\n def build_text_splitter(self) -> TextSplitter:\n return RecursiveCharacterTextSplitter.from_language(\n language=Language(self.code_language),\n chunk_size=self.chunk_size,\n chunk_overlap=self.chunk_overlap,\n )\n" + "value": "from typing import Any\n\nfrom langchain_text_splitters import Language, RecursiveCharacterTextSplitter, TextSplitter\n\nfrom langflow.base.textsplitters.model import LCTextSplitterComponent\nfrom langflow.inputs import DataInput, DropdownInput, IntInput\n\n\nclass LanguageRecursiveTextSplitterComponent(LCTextSplitterComponent):\n display_name: str = \"Language Recursive Text Splitter\"\n description: str = \"Split text into chunks of a specified length based on language.\"\n documentation: str = \"https://docs.langflow.org/components/text-splitters#languagerecursivetextsplitter\"\n name = \"LanguageRecursiveTextSplitter\"\n icon = \"LangChain\"\n\n inputs = [\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n info=\"The maximum length of each chunk.\",\n value=1000,\n ),\n IntInput(\n name=\"chunk_overlap\",\n display_name=\"Chunk Overlap\",\n info=\"The amount of overlap between chunks.\",\n value=200,\n ),\n DataInput(\n name=\"data_input\",\n display_name=\"Input\",\n info=\"The texts to split.\",\n input_types=[\"Document\", \"Data\"],\n required=True,\n ),\n DropdownInput(\n name=\"code_language\", display_name=\"Code Language\", options=[x.value for x in Language], value=\"python\"\n ),\n ]\n\n def get_data_input(self) -> Any:\n return self.data_input\n\n def build_text_splitter(self) -> TextSplitter:\n return RecursiveCharacterTextSplitter.from_language(\n language=Language(self.code_language),\n chunk_size=self.chunk_size,\n chunk_overlap=self.chunk_overlap,\n )\n" }, "code_language": { "_input_type": "DropdownInput", @@ -3404,7 +3406,7 @@ "list": false, "name": "data_input", "placeholder": "", - "required": false, + "required": true, "show": true, "title_case": false, "tool_mode": false, @@ -3494,7 +3496,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nfrom langchain_community.graph_vectorstores.extractors import HtmlLinkExtractor, LinkExtractorTransformer\nfrom langchain_core.documents import BaseDocumentTransformer\n\nfrom langflow.base.document_transformers.model import LCDocumentTransformerComponent\nfrom langflow.inputs import BoolInput, DataInput, StrInput\n\n\nclass HtmlLinkExtractorComponent(LCDocumentTransformerComponent):\n display_name = \"HTML Link Extractor\"\n description = \"Extract hyperlinks from HTML content.\"\n documentation = \"https://python.langchain.com/v0.2/api_reference/community/graph_vectorstores/langchain_community.graph_vectorstores.extractors.html_link_extractor.HtmlLinkExtractor.html\"\n name = \"HtmlLinkExtractor\"\n icon = \"LangChain\"\n\n inputs = [\n StrInput(name=\"kind\", display_name=\"Kind of edge\", value=\"hyperlink\", required=False),\n BoolInput(name=\"drop_fragments\", display_name=\"Drop URL fragments\", value=True, required=False),\n DataInput(\n name=\"data_input\",\n display_name=\"Input\",\n info=\"The texts from which to extract links.\",\n input_types=[\"Document\", \"Data\"],\n ),\n ]\n\n def get_data_input(self) -> Any:\n return self.data_input\n\n def build_document_transformer(self) -> BaseDocumentTransformer:\n return LinkExtractorTransformer(\n [HtmlLinkExtractor(kind=self.kind, drop_fragments=self.drop_fragments).as_document_extractor()]\n )\n" + "value": "from typing import Any\n\nfrom langchain_community.graph_vectorstores.extractors import HtmlLinkExtractor, LinkExtractorTransformer\nfrom langchain_core.documents import BaseDocumentTransformer\n\nfrom langflow.base.document_transformers.model import LCDocumentTransformerComponent\nfrom langflow.inputs import BoolInput, DataInput, StrInput\n\n\nclass HtmlLinkExtractorComponent(LCDocumentTransformerComponent):\n display_name = \"HTML Link Extractor\"\n description = \"Extract hyperlinks from HTML content.\"\n documentation = \"https://python.langchain.com/v0.2/api_reference/community/graph_vectorstores/langchain_community.graph_vectorstores.extractors.html_link_extractor.HtmlLinkExtractor.html\"\n name = \"HtmlLinkExtractor\"\n icon = \"LangChain\"\n\n inputs = [\n StrInput(name=\"kind\", display_name=\"Kind of edge\", value=\"hyperlink\", required=False),\n BoolInput(name=\"drop_fragments\", display_name=\"Drop URL fragments\", value=True, required=False),\n DataInput(\n name=\"data_input\",\n display_name=\"Input\",\n info=\"The texts from which to extract links.\",\n input_types=[\"Document\", \"Data\"],\n required=True,\n ),\n ]\n\n def get_data_input(self) -> Any:\n return self.data_input\n\n def build_document_transformer(self) -> BaseDocumentTransformer:\n return LinkExtractorTransformer(\n [HtmlLinkExtractor(kind=self.kind, drop_fragments=self.drop_fragments).as_document_extractor()]\n )\n" }, "data_input": { "_input_type": "DataInput", @@ -3509,7 +3511,7 @@ "list": false, "name": "data_input", "placeholder": "", - "required": false, + "required": true, "show": true, "title_case": false, "tool_mode": false, @@ -3624,7 +3626,9 @@ "display_name": "Embeddings", "method": "build_embeddings", "name": "embeddings", - "required_inputs": [], + "required_inputs": [ + "openai_api_key" + ], "selected": "Embeddings", "types": [ "Embeddings" @@ -3691,7 +3695,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from langchain_openai import OpenAIEmbeddings\n\nfrom langflow.base.embeddings.model import LCEmbeddingsModel\nfrom langflow.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom langflow.field_typing import Embeddings\nfrom langflow.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, MessageTextInput, SecretStrInput\n\n\nclass OpenAIEmbeddingsComponent(LCEmbeddingsModel):\n display_name = \"OpenAI Embeddings\"\n description = \"Generate embeddings using OpenAI models.\"\n icon = \"OpenAI\"\n name = \"OpenAIEmbeddings\"\n\n inputs = [\n DictInput(\n name=\"default_headers\",\n display_name=\"Default Headers\",\n advanced=True,\n info=\"Default headers to use for the API request.\",\n ),\n DictInput(\n name=\"default_query\",\n display_name=\"Default Query\",\n advanced=True,\n info=\"Default query parameters to use for the API request.\",\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n MessageTextInput(name=\"client\", display_name=\"Client\", advanced=True),\n MessageTextInput(name=\"deployment\", display_name=\"Deployment\", advanced=True),\n IntInput(name=\"embedding_ctx_length\", display_name=\"Embedding Context Length\", advanced=True, value=1536),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", value=3, advanced=True),\n DropdownInput(\n name=\"model\",\n display_name=\"Model\",\n advanced=False,\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=\"text-embedding-3-small\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n SecretStrInput(name=\"openai_api_key\", display_name=\"OpenAI API Key\", value=\"OPENAI_API_KEY\"),\n MessageTextInput(name=\"openai_api_base\", display_name=\"OpenAI API Base\", advanced=True),\n MessageTextInput(name=\"openai_api_type\", display_name=\"OpenAI API Type\", advanced=True),\n MessageTextInput(name=\"openai_api_version\", display_name=\"OpenAI API Version\", advanced=True),\n MessageTextInput(\n name=\"openai_organization\",\n display_name=\"OpenAI Organization\",\n advanced=True,\n ),\n MessageTextInput(name=\"openai_proxy\", display_name=\"OpenAI Proxy\", advanced=True),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n BoolInput(name=\"skip_empty\", display_name=\"Skip Empty\", advanced=True),\n MessageTextInput(\n name=\"tiktoken_model_name\",\n display_name=\"TikToken Model Name\",\n advanced=True,\n ),\n BoolInput(\n name=\"tiktoken_enable\",\n display_name=\"TikToken Enable\",\n advanced=True,\n value=True,\n info=\"If False, you must have transformers installed.\",\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n return OpenAIEmbeddings(\n client=self.client or None,\n model=self.model,\n dimensions=self.dimensions or None,\n deployment=self.deployment or None,\n api_version=self.openai_api_version or None,\n base_url=self.openai_api_base or None,\n openai_api_type=self.openai_api_type or None,\n openai_proxy=self.openai_proxy or None,\n embedding_ctx_length=self.embedding_ctx_length,\n api_key=self.openai_api_key or None,\n organization=self.openai_organization or None,\n allowed_special=\"all\",\n disallowed_special=\"all\",\n chunk_size=self.chunk_size,\n max_retries=self.max_retries,\n timeout=self.request_timeout or None,\n tiktoken_enabled=self.tiktoken_enable,\n tiktoken_model_name=self.tiktoken_model_name or None,\n show_progress_bar=self.show_progress_bar,\n model_kwargs=self.model_kwargs,\n skip_empty=self.skip_empty,\n default_headers=self.default_headers or None,\n default_query=self.default_query or None,\n )\n" + "value": "from langchain_openai import OpenAIEmbeddings\n\nfrom langflow.base.embeddings.model import LCEmbeddingsModel\nfrom langflow.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom langflow.field_typing import Embeddings\nfrom langflow.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, MessageTextInput, SecretStrInput\n\n\nclass OpenAIEmbeddingsComponent(LCEmbeddingsModel):\n display_name = \"OpenAI Embeddings\"\n description = \"Generate embeddings using OpenAI models.\"\n icon = \"OpenAI\"\n name = \"OpenAIEmbeddings\"\n\n inputs = [\n DictInput(\n name=\"default_headers\",\n display_name=\"Default Headers\",\n advanced=True,\n info=\"Default headers to use for the API request.\",\n ),\n DictInput(\n name=\"default_query\",\n display_name=\"Default Query\",\n advanced=True,\n info=\"Default query parameters to use for the API request.\",\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n MessageTextInput(name=\"client\", display_name=\"Client\", advanced=True),\n MessageTextInput(name=\"deployment\", display_name=\"Deployment\", advanced=True),\n IntInput(name=\"embedding_ctx_length\", display_name=\"Embedding Context Length\", advanced=True, value=1536),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", value=3, advanced=True),\n DropdownInput(\n name=\"model\",\n display_name=\"Model\",\n advanced=False,\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=\"text-embedding-3-small\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n SecretStrInput(name=\"openai_api_key\", display_name=\"OpenAI API Key\", value=\"OPENAI_API_KEY\", required=True),\n MessageTextInput(name=\"openai_api_base\", display_name=\"OpenAI API Base\", advanced=True),\n MessageTextInput(name=\"openai_api_type\", display_name=\"OpenAI API Type\", advanced=True),\n MessageTextInput(name=\"openai_api_version\", display_name=\"OpenAI API Version\", advanced=True),\n MessageTextInput(\n name=\"openai_organization\",\n display_name=\"OpenAI Organization\",\n advanced=True,\n ),\n MessageTextInput(name=\"openai_proxy\", display_name=\"OpenAI Proxy\", advanced=True),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n BoolInput(name=\"skip_empty\", display_name=\"Skip Empty\", advanced=True),\n MessageTextInput(\n name=\"tiktoken_model_name\",\n display_name=\"TikToken Model Name\",\n advanced=True,\n ),\n BoolInput(\n name=\"tiktoken_enable\",\n display_name=\"TikToken Enable\",\n advanced=True,\n value=True,\n info=\"If False, you must have transformers installed.\",\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n return OpenAIEmbeddings(\n client=self.client or None,\n model=self.model,\n dimensions=self.dimensions or None,\n deployment=self.deployment or None,\n api_version=self.openai_api_version or None,\n base_url=self.openai_api_base or None,\n openai_api_type=self.openai_api_type or None,\n openai_proxy=self.openai_proxy or None,\n embedding_ctx_length=self.embedding_ctx_length,\n api_key=self.openai_api_key or None,\n organization=self.openai_organization or None,\n allowed_special=\"all\",\n disallowed_special=\"all\",\n chunk_size=self.chunk_size,\n max_retries=self.max_retries,\n timeout=self.request_timeout or None,\n tiktoken_enabled=self.tiktoken_enable,\n tiktoken_model_name=self.tiktoken_model_name or None,\n show_progress_bar=self.show_progress_bar,\n model_kwargs=self.model_kwargs,\n skip_empty=self.skip_empty,\n default_headers=self.default_headers or None,\n default_query=self.default_query or None,\n )\n" }, "default_headers": { "_input_type": "DictInput", @@ -3874,7 +3878,7 @@ "name": "openai_api_key", "password": true, "placeholder": "", - "required": false, + "required": true, "show": true, "title_case": false, "type": "str", diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json b/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json index cedeb886f2ce..8ce425c11072 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json @@ -1727,7 +1727,9 @@ "display_name": "Embeddings", "method": "build_embeddings", "name": "embeddings", - "required_inputs": [], + "required_inputs": [ + "openai_api_key" + ], "selected": "Embeddings", "types": [ "Embeddings" @@ -1792,7 +1794,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from langchain_openai import OpenAIEmbeddings\n\nfrom langflow.base.embeddings.model import LCEmbeddingsModel\nfrom langflow.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom langflow.field_typing import Embeddings\nfrom langflow.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, MessageTextInput, SecretStrInput\n\n\nclass OpenAIEmbeddingsComponent(LCEmbeddingsModel):\n display_name = \"OpenAI Embeddings\"\n description = \"Generate embeddings using OpenAI models.\"\n icon = \"OpenAI\"\n name = \"OpenAIEmbeddings\"\n\n inputs = [\n DictInput(\n name=\"default_headers\",\n display_name=\"Default Headers\",\n advanced=True,\n info=\"Default headers to use for the API request.\",\n ),\n DictInput(\n name=\"default_query\",\n display_name=\"Default Query\",\n advanced=True,\n info=\"Default query parameters to use for the API request.\",\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n MessageTextInput(name=\"client\", display_name=\"Client\", advanced=True),\n MessageTextInput(name=\"deployment\", display_name=\"Deployment\", advanced=True),\n IntInput(name=\"embedding_ctx_length\", display_name=\"Embedding Context Length\", advanced=True, value=1536),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", value=3, advanced=True),\n DropdownInput(\n name=\"model\",\n display_name=\"Model\",\n advanced=False,\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=\"text-embedding-3-small\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n SecretStrInput(name=\"openai_api_key\", display_name=\"OpenAI API Key\", value=\"OPENAI_API_KEY\"),\n MessageTextInput(name=\"openai_api_base\", display_name=\"OpenAI API Base\", advanced=True),\n MessageTextInput(name=\"openai_api_type\", display_name=\"OpenAI API Type\", advanced=True),\n MessageTextInput(name=\"openai_api_version\", display_name=\"OpenAI API Version\", advanced=True),\n MessageTextInput(\n name=\"openai_organization\",\n display_name=\"OpenAI Organization\",\n advanced=True,\n ),\n MessageTextInput(name=\"openai_proxy\", display_name=\"OpenAI Proxy\", advanced=True),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n BoolInput(name=\"skip_empty\", display_name=\"Skip Empty\", advanced=True),\n MessageTextInput(\n name=\"tiktoken_model_name\",\n display_name=\"TikToken Model Name\",\n advanced=True,\n ),\n BoolInput(\n name=\"tiktoken_enable\",\n display_name=\"TikToken Enable\",\n advanced=True,\n value=True,\n info=\"If False, you must have transformers installed.\",\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n return OpenAIEmbeddings(\n client=self.client or None,\n model=self.model,\n dimensions=self.dimensions or None,\n deployment=self.deployment or None,\n api_version=self.openai_api_version or None,\n base_url=self.openai_api_base or None,\n openai_api_type=self.openai_api_type or None,\n openai_proxy=self.openai_proxy or None,\n embedding_ctx_length=self.embedding_ctx_length,\n api_key=self.openai_api_key or None,\n organization=self.openai_organization or None,\n allowed_special=\"all\",\n disallowed_special=\"all\",\n chunk_size=self.chunk_size,\n max_retries=self.max_retries,\n timeout=self.request_timeout or None,\n tiktoken_enabled=self.tiktoken_enable,\n tiktoken_model_name=self.tiktoken_model_name or None,\n show_progress_bar=self.show_progress_bar,\n model_kwargs=self.model_kwargs,\n skip_empty=self.skip_empty,\n default_headers=self.default_headers or None,\n default_query=self.default_query or None,\n )\n" + "value": "from langchain_openai import OpenAIEmbeddings\n\nfrom langflow.base.embeddings.model import LCEmbeddingsModel\nfrom langflow.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom langflow.field_typing import Embeddings\nfrom langflow.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, MessageTextInput, SecretStrInput\n\n\nclass OpenAIEmbeddingsComponent(LCEmbeddingsModel):\n display_name = \"OpenAI Embeddings\"\n description = \"Generate embeddings using OpenAI models.\"\n icon = \"OpenAI\"\n name = \"OpenAIEmbeddings\"\n\n inputs = [\n DictInput(\n name=\"default_headers\",\n display_name=\"Default Headers\",\n advanced=True,\n info=\"Default headers to use for the API request.\",\n ),\n DictInput(\n name=\"default_query\",\n display_name=\"Default Query\",\n advanced=True,\n info=\"Default query parameters to use for the API request.\",\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n MessageTextInput(name=\"client\", display_name=\"Client\", advanced=True),\n MessageTextInput(name=\"deployment\", display_name=\"Deployment\", advanced=True),\n IntInput(name=\"embedding_ctx_length\", display_name=\"Embedding Context Length\", advanced=True, value=1536),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", value=3, advanced=True),\n DropdownInput(\n name=\"model\",\n display_name=\"Model\",\n advanced=False,\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=\"text-embedding-3-small\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n SecretStrInput(name=\"openai_api_key\", display_name=\"OpenAI API Key\", value=\"OPENAI_API_KEY\", required=True),\n MessageTextInput(name=\"openai_api_base\", display_name=\"OpenAI API Base\", advanced=True),\n MessageTextInput(name=\"openai_api_type\", display_name=\"OpenAI API Type\", advanced=True),\n MessageTextInput(name=\"openai_api_version\", display_name=\"OpenAI API Version\", advanced=True),\n MessageTextInput(\n name=\"openai_organization\",\n display_name=\"OpenAI Organization\",\n advanced=True,\n ),\n MessageTextInput(name=\"openai_proxy\", display_name=\"OpenAI Proxy\", advanced=True),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n BoolInput(name=\"skip_empty\", display_name=\"Skip Empty\", advanced=True),\n MessageTextInput(\n name=\"tiktoken_model_name\",\n display_name=\"TikToken Model Name\",\n advanced=True,\n ),\n BoolInput(\n name=\"tiktoken_enable\",\n display_name=\"TikToken Enable\",\n advanced=True,\n value=True,\n info=\"If False, you must have transformers installed.\",\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n return OpenAIEmbeddings(\n client=self.client or None,\n model=self.model,\n dimensions=self.dimensions or None,\n deployment=self.deployment or None,\n api_version=self.openai_api_version or None,\n base_url=self.openai_api_base or None,\n openai_api_type=self.openai_api_type or None,\n openai_proxy=self.openai_proxy or None,\n embedding_ctx_length=self.embedding_ctx_length,\n api_key=self.openai_api_key or None,\n organization=self.openai_organization or None,\n allowed_special=\"all\",\n disallowed_special=\"all\",\n chunk_size=self.chunk_size,\n max_retries=self.max_retries,\n timeout=self.request_timeout or None,\n tiktoken_enabled=self.tiktoken_enable,\n tiktoken_model_name=self.tiktoken_model_name or None,\n show_progress_bar=self.show_progress_bar,\n model_kwargs=self.model_kwargs,\n skip_empty=self.skip_empty,\n default_headers=self.default_headers or None,\n default_query=self.default_query or None,\n )\n" }, "default_headers": { "_input_type": "DictInput", @@ -1969,7 +1971,7 @@ "name": "openai_api_key", "password": true, "placeholder": "", - "required": false, + "required": true, "show": true, "title_case": false, "type": "str", @@ -2250,7 +2252,9 @@ "display_name": "Embeddings", "method": "build_embeddings", "name": "embeddings", - "required_inputs": [], + "required_inputs": [ + "openai_api_key" + ], "selected": "Embeddings", "types": [ "Embeddings" @@ -2315,7 +2319,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from langchain_openai import OpenAIEmbeddings\n\nfrom langflow.base.embeddings.model import LCEmbeddingsModel\nfrom langflow.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom langflow.field_typing import Embeddings\nfrom langflow.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, MessageTextInput, SecretStrInput\n\n\nclass OpenAIEmbeddingsComponent(LCEmbeddingsModel):\n display_name = \"OpenAI Embeddings\"\n description = \"Generate embeddings using OpenAI models.\"\n icon = \"OpenAI\"\n name = \"OpenAIEmbeddings\"\n\n inputs = [\n DictInput(\n name=\"default_headers\",\n display_name=\"Default Headers\",\n advanced=True,\n info=\"Default headers to use for the API request.\",\n ),\n DictInput(\n name=\"default_query\",\n display_name=\"Default Query\",\n advanced=True,\n info=\"Default query parameters to use for the API request.\",\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n MessageTextInput(name=\"client\", display_name=\"Client\", advanced=True),\n MessageTextInput(name=\"deployment\", display_name=\"Deployment\", advanced=True),\n IntInput(name=\"embedding_ctx_length\", display_name=\"Embedding Context Length\", advanced=True, value=1536),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", value=3, advanced=True),\n DropdownInput(\n name=\"model\",\n display_name=\"Model\",\n advanced=False,\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=\"text-embedding-3-small\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n SecretStrInput(name=\"openai_api_key\", display_name=\"OpenAI API Key\", value=\"OPENAI_API_KEY\"),\n MessageTextInput(name=\"openai_api_base\", display_name=\"OpenAI API Base\", advanced=True),\n MessageTextInput(name=\"openai_api_type\", display_name=\"OpenAI API Type\", advanced=True),\n MessageTextInput(name=\"openai_api_version\", display_name=\"OpenAI API Version\", advanced=True),\n MessageTextInput(\n name=\"openai_organization\",\n display_name=\"OpenAI Organization\",\n advanced=True,\n ),\n MessageTextInput(name=\"openai_proxy\", display_name=\"OpenAI Proxy\", advanced=True),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n BoolInput(name=\"skip_empty\", display_name=\"Skip Empty\", advanced=True),\n MessageTextInput(\n name=\"tiktoken_model_name\",\n display_name=\"TikToken Model Name\",\n advanced=True,\n ),\n BoolInput(\n name=\"tiktoken_enable\",\n display_name=\"TikToken Enable\",\n advanced=True,\n value=True,\n info=\"If False, you must have transformers installed.\",\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n return OpenAIEmbeddings(\n client=self.client or None,\n model=self.model,\n dimensions=self.dimensions or None,\n deployment=self.deployment or None,\n api_version=self.openai_api_version or None,\n base_url=self.openai_api_base or None,\n openai_api_type=self.openai_api_type or None,\n openai_proxy=self.openai_proxy or None,\n embedding_ctx_length=self.embedding_ctx_length,\n api_key=self.openai_api_key or None,\n organization=self.openai_organization or None,\n allowed_special=\"all\",\n disallowed_special=\"all\",\n chunk_size=self.chunk_size,\n max_retries=self.max_retries,\n timeout=self.request_timeout or None,\n tiktoken_enabled=self.tiktoken_enable,\n tiktoken_model_name=self.tiktoken_model_name or None,\n show_progress_bar=self.show_progress_bar,\n model_kwargs=self.model_kwargs,\n skip_empty=self.skip_empty,\n default_headers=self.default_headers or None,\n default_query=self.default_query or None,\n )\n" + "value": "from langchain_openai import OpenAIEmbeddings\n\nfrom langflow.base.embeddings.model import LCEmbeddingsModel\nfrom langflow.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom langflow.field_typing import Embeddings\nfrom langflow.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, MessageTextInput, SecretStrInput\n\n\nclass OpenAIEmbeddingsComponent(LCEmbeddingsModel):\n display_name = \"OpenAI Embeddings\"\n description = \"Generate embeddings using OpenAI models.\"\n icon = \"OpenAI\"\n name = \"OpenAIEmbeddings\"\n\n inputs = [\n DictInput(\n name=\"default_headers\",\n display_name=\"Default Headers\",\n advanced=True,\n info=\"Default headers to use for the API request.\",\n ),\n DictInput(\n name=\"default_query\",\n display_name=\"Default Query\",\n advanced=True,\n info=\"Default query parameters to use for the API request.\",\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n MessageTextInput(name=\"client\", display_name=\"Client\", advanced=True),\n MessageTextInput(name=\"deployment\", display_name=\"Deployment\", advanced=True),\n IntInput(name=\"embedding_ctx_length\", display_name=\"Embedding Context Length\", advanced=True, value=1536),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", value=3, advanced=True),\n DropdownInput(\n name=\"model\",\n display_name=\"Model\",\n advanced=False,\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=\"text-embedding-3-small\",\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n SecretStrInput(name=\"openai_api_key\", display_name=\"OpenAI API Key\", value=\"OPENAI_API_KEY\", required=True),\n MessageTextInput(name=\"openai_api_base\", display_name=\"OpenAI API Base\", advanced=True),\n MessageTextInput(name=\"openai_api_type\", display_name=\"OpenAI API Type\", advanced=True),\n MessageTextInput(name=\"openai_api_version\", display_name=\"OpenAI API Version\", advanced=True),\n MessageTextInput(\n name=\"openai_organization\",\n display_name=\"OpenAI Organization\",\n advanced=True,\n ),\n MessageTextInput(name=\"openai_proxy\", display_name=\"OpenAI Proxy\", advanced=True),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n BoolInput(name=\"skip_empty\", display_name=\"Skip Empty\", advanced=True),\n MessageTextInput(\n name=\"tiktoken_model_name\",\n display_name=\"TikToken Model Name\",\n advanced=True,\n ),\n BoolInput(\n name=\"tiktoken_enable\",\n display_name=\"TikToken Enable\",\n advanced=True,\n value=True,\n info=\"If False, you must have transformers installed.\",\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n return OpenAIEmbeddings(\n client=self.client or None,\n model=self.model,\n dimensions=self.dimensions or None,\n deployment=self.deployment or None,\n api_version=self.openai_api_version or None,\n base_url=self.openai_api_base or None,\n openai_api_type=self.openai_api_type or None,\n openai_proxy=self.openai_proxy or None,\n embedding_ctx_length=self.embedding_ctx_length,\n api_key=self.openai_api_key or None,\n organization=self.openai_organization or None,\n allowed_special=\"all\",\n disallowed_special=\"all\",\n chunk_size=self.chunk_size,\n max_retries=self.max_retries,\n timeout=self.request_timeout or None,\n tiktoken_enabled=self.tiktoken_enable,\n tiktoken_model_name=self.tiktoken_model_name or None,\n show_progress_bar=self.show_progress_bar,\n model_kwargs=self.model_kwargs,\n skip_empty=self.skip_empty,\n default_headers=self.default_headers or None,\n default_query=self.default_query or None,\n )\n" }, "default_headers": { "_input_type": "DictInput", @@ -2492,7 +2496,7 @@ "name": "openai_api_key", "password": true, "placeholder": "", - "required": false, + "required": true, "show": true, "title_case": false, "type": "str", From 47dc891ecce0aeb0decb374697cc3fa5eec6c964 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vin=C3=ADcios=20Batista=20da=20Silva?= Date: Thu, 16 Jan 2025 18:34:39 -0300 Subject: [PATCH 20/22] feat: make YouTube Transcripts URL field required (#5686) feat: Enhance YouTube Transcripts component by adding required field validation to URL input This change ensures that users provide a video URL before using the YouTube Transcripts component, preventing potential runtime errors due to missing video source. --- .../base/langflow/components/tools/youtube_transcripts.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/backend/base/langflow/components/tools/youtube_transcripts.py b/src/backend/base/langflow/components/tools/youtube_transcripts.py index a2cff988a075..19250194bd16 100644 --- a/src/backend/base/langflow/components/tools/youtube_transcripts.py +++ b/src/backend/base/langflow/components/tools/youtube_transcripts.py @@ -21,6 +21,7 @@ class YouTubeTranscriptsComponent(Component): display_name="Video URL", info="Enter the YouTube video URL to get transcripts from.", tool_mode=True, + required=True, ), DropdownInput( name="transcript_format", From a5f5f3e3e30ee1740b696e3ad1823287ba27870c Mon Sep 17 00:00:00 2001 From: Christophe Bornet Date: Thu, 16 Jan 2025 23:41:25 +0100 Subject: [PATCH 21/22] fix: Fix memory leak when creating components (#5733) Fix memory leak when creating components --- src/backend/base/langflow/utils/validate.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/backend/base/langflow/utils/validate.py b/src/backend/base/langflow/utils/validate.py index 86dec60abcbf..dee5eb61fbd7 100644 --- a/src/backend/base/langflow/utils/validate.py +++ b/src/backend/base/langflow/utils/validate.py @@ -313,7 +313,6 @@ def build_custom_class(): return exec_globals[class_name] - build_custom_class.__globals__.update(exec_globals) return build_custom_class() From 36b3289da20473aff3479bce9830f226d177603a Mon Sep 17 00:00:00 2001 From: Cristhian Zanforlin Lousa Date: Fri, 17 Jan 2025 11:17:26 -0300 Subject: [PATCH 22/22] test: Update API key requirements and test configurations for frontend tests (#5752) --- .../Custom Component Generator.spec.ts | 30 +++++++++---------- .../tests/core/unit/sliderComponent.spec.ts | 3 +- 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/src/frontend/tests/core/integrations/Custom Component Generator.spec.ts b/src/frontend/tests/core/integrations/Custom Component Generator.spec.ts index 9cc731c1e114..389790d5f125 100644 --- a/src/frontend/tests/core/integrations/Custom Component Generator.spec.ts +++ b/src/frontend/tests/core/integrations/Custom Component Generator.spec.ts @@ -1,14 +1,8 @@ import { expect, test } from "@playwright/test"; import * as dotenv from "dotenv"; import path from "path"; -import { addNewApiKeys } from "../../utils/add-new-api-keys"; -import { adjustScreenView } from "../../utils/adjust-screen-view"; import { awaitBootstrapTest } from "../../utils/await-bootstrap-test"; import { getAllResponseMessage } from "../../utils/get-all-response-message"; -import { initialGPTsetup } from "../../utils/initialGPTsetup"; -import { removeOldApiKeys } from "../../utils/remove-old-api-keys"; -import { selectGptModel } from "../../utils/select-gpt-model"; -import { updateOldComponents } from "../../utils/update-old-components"; import { waitForOpenModalWithChatInput } from "../../utils/wait-for-open-modal"; test( @@ -17,7 +11,7 @@ test( async ({ page }) => { test.skip( !process?.env?.ANTHROPIC_API_KEY, - "OPENAI_API_KEY required to run this test", + "ANTHROPIC_API_KEY required to run this test", ); if (!process.env.CI) { @@ -35,16 +29,18 @@ test( timeout: 100000, }); - await initialGPTsetup(page); + await page + .getByTestId("popover-anchor-input-api_key") + .last() + .fill(process.env.ANTHROPIC_API_KEY ?? ""); - const apiKeyInput = page.getByTestId( - "popover-anchor-input-anthropic_api_key", - ); - const isApiKeyInputVisible = await apiKeyInput.isVisible(); + await page.waitForSelector('[data-testid="dropdown_str_model_name"]', { + timeout: 5000, + }); - if (isApiKeyInputVisible) { - await apiKeyInput.fill(process.env.ANTHROPIC_API_KEY ?? ""); - } + await page.getByTestId("dropdown_str_model_name").click(); + + await page.keyboard.press("Enter"); await page.getByTestId("button_run_chat output").click(); await page.waitForSelector("text=built successfully", { timeout: 30000 }); @@ -63,7 +59,9 @@ test( const textContents = await getAllResponseMessage(page); expect(textContents.length).toBeGreaterThan(100); - expect(await page.getByTestId("chat-code-tab").isVisible()).toBe(true); + expect(await page.getByTestId("chat-code-tab").last().isVisible()).toBe( + true, + ); expect(textContents).toContain("langflow"); }, ); diff --git a/src/frontend/tests/core/unit/sliderComponent.spec.ts b/src/frontend/tests/core/unit/sliderComponent.spec.ts index 6f8ebba69937..9f7ae00e587e 100644 --- a/src/frontend/tests/core/unit/sliderComponent.spec.ts +++ b/src/frontend/tests/core/unit/sliderComponent.spec.ts @@ -39,7 +39,8 @@ test( cleanCode = cleanCode.replace("FloatInput(", "SliderInput("); cleanCode = cleanCode.replace( "from langflow.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, StrInput", - "from langflow.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, StrInput, SliderInput", + "from langflow.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, StrInput, SliderInput\n" + + "from langflow.field_typing.range_spec import RangeSpec", ); cleanCode = cleanCode.replace(