From c1a972c533e367e546bf99ef359050d5a1e73586 Mon Sep 17 00:00:00 2001 From: Rodrigo Nader Date: Thu, 13 Mar 2025 14:51:34 -0300 Subject: [PATCH] Fix: Text split issues related to separator (#6993) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fixes text split issues related to separator * [autofix.ci] apply automated fixes * [autofix.ci] apply automated fixes (attempt 2/3) * format error fix * Update Vector Store RAG.json * [autofix.ci] apply automated fixes * ๐Ÿ“ (freeze.spec.ts): update test description to match the actual element being tested for better clarity and maintainability * โœ… (stop-building.spec.ts): update test description to improve clarity and maintainability โœ… (stop-button-playground.spec.ts): add wait time before filling search input to ensure proper loading and interaction with the element --------- Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: Edwin Jose Co-authored-by: Gabriel Luiz Freitas Almeida Co-authored-by: cristhianzl Co-authored-by: รtalo Johnny --- .../components/processing/split_text.py | 51 +++++++++++++++---- .../starter_projects/Vector Store RAG.json | 35 +++++++++++-- .../tests/core/features/freeze.spec.ts | 2 +- .../tests/core/features/stop-building.spec.ts | 2 +- .../features/stop-button-playground.spec.ts | 2 + 5 files changed, 75 insertions(+), 17 deletions(-) diff --git a/src/backend/base/langflow/components/processing/split_text.py b/src/backend/base/langflow/components/processing/split_text.py index e6118f118..50e805a90 100644 --- a/src/backend/base/langflow/components/processing/split_text.py +++ b/src/backend/base/langflow/components/processing/split_text.py @@ -1,7 +1,7 @@ from langchain_text_splitters import CharacterTextSplitter from langflow.custom import Component -from langflow.io import HandleInput, IntInput, MessageTextInput, Output +from langflow.io import DropdownInput, HandleInput, IntInput, MessageTextInput, Output from langflow.schema import Data, DataFrame from langflow.utils.util import unescape_string @@ -15,8 +15,8 @@ class SplitTextComponent(Component): inputs = [ HandleInput( name="data_inputs", - display_name="Input Documents", - info="The data to split.", + display_name="Data or DataFrame", + info="The data with texts to split in chunks.", input_types=["Data", "DataFrame"], required=True, ), @@ -29,13 +29,20 @@ class SplitTextComponent(Component): IntInput( name="chunk_size", display_name="Chunk Size", - info="The maximum number of characters in each chunk.", + info=( + "The maximum length of each chunk. Text is first split by separator, " + "then chunks are merged up to this size. " + "Individual splits larger than this won't be further divided." + ), value=1000, ), MessageTextInput( name="separator", display_name="Separator", - info="The character to split on. Defaults to newline.", + info=( + "The character to split on. Use \\n for newline. " + "Examples: \\n\\n for paragraphs, \\n for lines, . for sentences" + ), value="\n", ), MessageTextInput( @@ -45,6 +52,14 @@ class SplitTextComponent(Component): value="text", advanced=True, ), + DropdownInput( + name="keep_separator", + display_name="Keep Separator", + info="Whether to keep the separator in the output chunks and where to place it.", + options=["False", "True", "Start", "End"], + value="False", + advanced=True, + ), ] outputs = [ @@ -55,12 +70,18 @@ class SplitTextComponent(Component): def _docs_to_data(self, docs) -> list[Data]: return [Data(text=doc.page_content, data=doc.metadata) for doc in docs] - def _docs_to_dataframe(self, docs): - data_dicts = [{self.text_key: doc.page_content, **doc.metadata} for doc in docs] - return DataFrame(data_dicts) + def _fix_separator(self, separator: str) -> str: + """Fix common separator issues and convert to proper format.""" + if separator == "/n": + return "\n" + if separator == "/t": + return "\t" + return separator def split_text_base(self): - separator = unescape_string(self.separator) + separator = self._fix_separator(self.separator) + separator = unescape_string(separator) + if isinstance(self.data_inputs, DataFrame): if not len(self.data_inputs): msg = "DataFrame is empty" @@ -91,10 +112,20 @@ class SplitTextComponent(Component): msg = f"Invalid input type in collection: {e}" raise TypeError(msg) from e try: + # Convert string 'False'/'True' to boolean + keep_sep = self.keep_separator + if isinstance(keep_sep, str): + if keep_sep.lower() == "false": + keep_sep = False + elif keep_sep.lower() == "true": + keep_sep = True + # 'start' and 'end' are kept as strings + splitter = CharacterTextSplitter( chunk_overlap=self.chunk_overlap, chunk_size=self.chunk_size, separator=separator, + keep_separator=keep_sep, ) return splitter.split_documents(documents) except Exception as e: @@ -105,4 +136,4 @@ class SplitTextComponent(Component): return self._docs_to_data(self.split_text_base()) def as_dataframe(self) -> DataFrame: - return self._docs_to_dataframe(self.split_text_base()) + return DataFrame(self.split_text()) diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json b/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json index 8372502b8..5c25f6bae 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json @@ -973,7 +973,7 @@ "advanced": false, "display_name": "Chunk Size", "dynamic": false, - "info": "The maximum number of characters in each chunk.", + "info": "The maximum length of each chunk. Text is first split by separator, then chunks are merged up to this size. Individual splits larger than this won't be further divided.", "list": false, "name": "chunk_size", "placeholder": "", @@ -1000,13 +1000,13 @@ "show": true, "title_case": false, "type": "code", - "value": "from langchain_text_splitters import CharacterTextSplitter\n\nfrom langflow.custom import Component\nfrom langflow.io import HandleInput, IntInput, MessageTextInput, Output\nfrom langflow.schema import Data, DataFrame\nfrom langflow.utils.util import unescape_string\n\n\nclass SplitTextComponent(Component):\n display_name: str = \"Split Text\"\n description: str = \"Split text into chunks based on specified criteria.\"\n icon = \"scissors-line-dashed\"\n name = \"SplitText\"\n\n inputs = [\n HandleInput(\n name=\"data_inputs\",\n display_name=\"Input Documents\",\n info=\"The data to split.\",\n input_types=[\"Data\", \"DataFrame\"],\n required=True,\n ),\n IntInput(\n name=\"chunk_overlap\",\n display_name=\"Chunk Overlap\",\n info=\"Number of characters to overlap between chunks.\",\n value=200,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n info=\"The maximum number of characters in each chunk.\",\n value=1000,\n ),\n MessageTextInput(\n name=\"separator\",\n display_name=\"Separator\",\n info=\"The character to split on. Defaults to newline.\",\n value=\"\\n\",\n ),\n MessageTextInput(\n name=\"text_key\",\n display_name=\"Text Key\",\n info=\"The key to use for the text column.\",\n value=\"text\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Chunks\", name=\"chunks\", method=\"split_text\"),\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"as_dataframe\"),\n ]\n\n def _docs_to_data(self, docs) -> list[Data]:\n return [Data(text=doc.page_content, data=doc.metadata) for doc in docs]\n\n def _docs_to_dataframe(self, docs):\n data_dicts = [{self.text_key: doc.page_content, **doc.metadata} for doc in docs]\n return DataFrame(data_dicts)\n\n def split_text_base(self):\n separator = unescape_string(self.separator)\n if isinstance(self.data_inputs, DataFrame):\n if not len(self.data_inputs):\n msg = \"DataFrame is empty\"\n raise TypeError(msg)\n\n self.data_inputs.text_key = self.text_key\n try:\n documents = self.data_inputs.to_lc_documents()\n except Exception as e:\n msg = f\"Error converting DataFrame to documents: {e}\"\n raise TypeError(msg) from e\n else:\n if not self.data_inputs:\n msg = \"No data inputs provided\"\n raise TypeError(msg)\n\n documents = []\n if isinstance(self.data_inputs, Data):\n self.data_inputs.text_key = self.text_key\n documents = [self.data_inputs.to_lc_document()]\n else:\n try:\n documents = [input_.to_lc_document() for input_ in self.data_inputs if isinstance(input_, Data)]\n if not documents:\n msg = f\"No valid Data inputs found in {type(self.data_inputs)}\"\n raise TypeError(msg)\n except AttributeError as e:\n msg = f\"Invalid input type in collection: {e}\"\n raise TypeError(msg) from e\n try:\n splitter = CharacterTextSplitter(\n chunk_overlap=self.chunk_overlap,\n chunk_size=self.chunk_size,\n separator=separator,\n )\n return splitter.split_documents(documents)\n except Exception as e:\n msg = f\"Error splitting text: {e}\"\n raise TypeError(msg) from e\n\n def split_text(self) -> list[Data]:\n return self._docs_to_data(self.split_text_base())\n\n def as_dataframe(self) -> DataFrame:\n return self._docs_to_dataframe(self.split_text_base())\n" + "value": "from langchain_text_splitters import CharacterTextSplitter\n\nfrom langflow.custom import Component\nfrom langflow.io import DropdownInput, HandleInput, IntInput, MessageTextInput, Output\nfrom langflow.schema import Data, DataFrame\nfrom langflow.utils.util import unescape_string\n\n\nclass SplitTextComponent(Component):\n display_name: str = \"Split Text\"\n description: str = \"Split text into chunks based on specified criteria.\"\n icon = \"scissors-line-dashed\"\n name = \"SplitText\"\n\n inputs = [\n HandleInput(\n name=\"data_inputs\",\n display_name=\"Data or DataFrame\",\n info=\"The data with texts to split in chunks.\",\n input_types=[\"Data\", \"DataFrame\"],\n required=True,\n ),\n IntInput(\n name=\"chunk_overlap\",\n display_name=\"Chunk Overlap\",\n info=\"Number of characters to overlap between chunks.\",\n value=200,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n info=(\n \"The maximum length of each chunk. Text is first split by separator, \"\n \"then chunks are merged up to this size. \"\n \"Individual splits larger than this won't be further divided.\"\n ),\n value=1000,\n ),\n MessageTextInput(\n name=\"separator\",\n display_name=\"Separator\",\n info=(\n \"The character to split on. Use \\\\n for newline. \"\n \"Examples: \\\\n\\\\n for paragraphs, \\\\n for lines, . for sentences\"\n ),\n value=\"\\n\",\n ),\n MessageTextInput(\n name=\"text_key\",\n display_name=\"Text Key\",\n info=\"The key to use for the text column.\",\n value=\"text\",\n advanced=True,\n ),\n DropdownInput(\n name=\"keep_separator\",\n display_name=\"Keep Separator\",\n info=\"Whether to keep the separator in the output chunks and where to place it.\",\n options=[\"False\", \"True\", \"Start\", \"End\"],\n value=\"False\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Chunks\", name=\"chunks\", method=\"split_text\"),\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"as_dataframe\"),\n ]\n\n def _docs_to_data(self, docs) -> list[Data]:\n return [Data(text=doc.page_content, data=doc.metadata) for doc in docs]\n\n def _fix_separator(self, separator: str) -> str:\n \"\"\"Fix common separator issues and convert to proper format.\"\"\"\n if separator == \"/n\":\n return \"\\n\"\n if separator == \"/t\":\n return \"\\t\"\n return separator\n\n def split_text_base(self):\n separator = self._fix_separator(self.separator)\n separator = unescape_string(separator)\n\n if isinstance(self.data_inputs, DataFrame):\n if not len(self.data_inputs):\n msg = \"DataFrame is empty\"\n raise TypeError(msg)\n\n self.data_inputs.text_key = self.text_key\n try:\n documents = self.data_inputs.to_lc_documents()\n except Exception as e:\n msg = f\"Error converting DataFrame to documents: {e}\"\n raise TypeError(msg) from e\n else:\n if not self.data_inputs:\n msg = \"No data inputs provided\"\n raise TypeError(msg)\n\n documents = []\n if isinstance(self.data_inputs, Data):\n self.data_inputs.text_key = self.text_key\n documents = [self.data_inputs.to_lc_document()]\n else:\n try:\n documents = [input_.to_lc_document() for input_ in self.data_inputs if isinstance(input_, Data)]\n if not documents:\n msg = f\"No valid Data inputs found in {type(self.data_inputs)}\"\n raise TypeError(msg)\n except AttributeError as e:\n msg = f\"Invalid input type in collection: {e}\"\n raise TypeError(msg) from e\n try:\n # Convert string 'False'/'True' to boolean\n keep_sep = self.keep_separator\n if isinstance(keep_sep, str):\n if keep_sep.lower() == \"false\":\n keep_sep = False\n elif keep_sep.lower() == \"true\":\n keep_sep = True\n # 'start' and 'end' are kept as strings\n\n splitter = CharacterTextSplitter(\n chunk_overlap=self.chunk_overlap,\n chunk_size=self.chunk_size,\n separator=separator,\n keep_separator=keep_sep,\n )\n return splitter.split_documents(documents)\n except Exception as e:\n msg = f\"Error splitting text: {e}\"\n raise TypeError(msg) from e\n\n def split_text(self) -> list[Data]:\n return self._docs_to_data(self.split_text_base())\n\n def as_dataframe(self) -> DataFrame:\n return DataFrame(self.split_text())\n" }, "data_inputs": { "advanced": false, - "display_name": "Input Documents", + "display_name": "Data or DataFrame", "dynamic": false, - "info": "The data to split.", + "info": "The data with texts to split in chunks.", "input_types": [ "Data", "DataFrame" @@ -1021,11 +1021,36 @@ "type": "other", "value": "" }, + "keep_separator": { + "_input_type": "DropdownInput", + "advanced": true, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Keep Separator", + "dynamic": false, + "info": "Whether to keep the separator in the output chunks and where to place it.", + "name": "keep_separator", + "options": [ + "False", + "True", + "Start", + "End" + ], + "options_metadata": [], + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "str", + "value": "False" + }, "separator": { "advanced": false, "display_name": "Separator", "dynamic": false, - "info": "The character to split on. Defaults to newline.", + "info": "The character to split on. Use \\n for newline. Examples: \\n\\n for paragraphs, \\n for lines, . for sentences", "input_types": [ "Message" ], diff --git a/src/frontend/tests/core/features/freeze.spec.ts b/src/frontend/tests/core/features/freeze.spec.ts index 2c7eab80e..545c6bbac 100644 --- a/src/frontend/tests/core/features/freeze.spec.ts +++ b/src/frontend/tests/core/features/freeze.spec.ts @@ -119,7 +119,7 @@ test( await urlOutput.hover(); await page.mouse.down(); const splitTextInputData = await page.getByTestId( - "handle-splittext-shownode-input documents-left", + "handle-splittext-shownode-data or dataframe-left", ); await splitTextInputData.hover(); await page.mouse.up(); diff --git a/src/frontend/tests/core/features/stop-building.spec.ts b/src/frontend/tests/core/features/stop-building.spec.ts index 9a73cc97b..cf7b525fc 100644 --- a/src/frontend/tests/core/features/stop-building.spec.ts +++ b/src/frontend/tests/core/features/stop-building.spec.ts @@ -82,7 +82,7 @@ test( await urlOutput.hover(); await page.mouse.down(); const splitTextInputData = await page.getByTestId( - "handle-splittext-shownode-input documents-left", + "handle-splittext-shownode-data or dataframe-left", ); await splitTextInputData.hover(); await page.mouse.up(); diff --git a/src/frontend/tests/extended/features/stop-button-playground.spec.ts b/src/frontend/tests/extended/features/stop-button-playground.spec.ts index fcff9a0ac..fdfdf774d 100644 --- a/src/frontend/tests/extended/features/stop-button-playground.spec.ts +++ b/src/frontend/tests/extended/features/stop-button-playground.spec.ts @@ -25,7 +25,9 @@ test( await page.getByTitle("fit view").click(); await page.getByTestId("sidebar-search-input").click(); + await page.waitForTimeout(500); await page.getByTestId("sidebar-search-input").fill("chat output"); + await page.waitForTimeout(500); await page.waitForSelector('[data-testid="outputsChat Output"]', { timeout: 3000,