From e8529eaecb45312ccf4038a171dcb0a8446bdcb2 Mon Sep 17 00:00:00 2001 From: Edwin Jose Date: Wed, 19 Feb 2025 15:40:56 -0500 Subject: [PATCH] feat: add support to accept Dataframe as input to split text, and added relevant tests (#6302) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * update to support dataframe * [autofix.ci] apply automated fixes * [autofix.ci] apply automated fixes (attempt 2/3) * Update split_text.py * [autofix.ci] apply automated fixes * [autofix.ci] apply automated fixes * update names * Update src/backend/base/langflow/schema/dataframe.py Co-authored-by: Gabriel Luiz Freitas Almeida * [autofix.ci] apply automated fixes * update to template * update review changes * Update Vector Store RAG.json * fix lint errors * fix tests * 📝 (freeze.spec.ts): update test description to match the actual element being tested for better clarity and accuracy * ✨ (stop-button-playground.spec.ts): improve test reliability by specifying target position for drag action to prevent flakiness * ✅ (logs.spec.ts): increase timeout from 1000ms to 3000ms for better test reliability ✅ (stop-building.spec.ts): update test selector from "handle-splittext-shownode-data inputs-left" to "handle-splittext-shownode-input documents-left" for accurate testing ✅ (starter-projects.spec.ts): add a 1000ms timeout before asserting visibility of an element for better test stability --------- Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: Gabriel Luiz Freitas Almeida Co-authored-by: cristhianzl --- .../components/processing/split_text.py | 72 +- .../starter_projects/Vector Store RAG.json | 1828 +++++++++-------- src/backend/base/langflow/schema/dataframe.py | 74 +- .../processing/test_split_text_component.py | 52 + .../unit/schema/test_schema_dataframe.py | 66 + src/frontend/package-lock.json | 1 + .../tests/core/features/freeze.spec.ts | 2 +- src/frontend/tests/core/features/logs.spec.ts | 2 +- .../tests/core/features/stop-building.spec.ts | 2 +- .../features/starter-projects.spec.ts | 2 + .../features/stop-button-playground.spec.ts | 7 +- 11 files changed, 1184 insertions(+), 924 deletions(-) create mode 100644 src/backend/tests/unit/schema/test_schema_dataframe.py diff --git a/src/backend/base/langflow/components/processing/split_text.py b/src/backend/base/langflow/components/processing/split_text.py index 870b6328d..e6118f118 100644 --- a/src/backend/base/langflow/components/processing/split_text.py +++ b/src/backend/base/langflow/components/processing/split_text.py @@ -15,10 +15,9 @@ class SplitTextComponent(Component): inputs = [ HandleInput( name="data_inputs", - display_name="Data Inputs", + display_name="Input Documents", info="The data to split.", - input_types=["Data"], - is_list=True, + input_types=["Data", "DataFrame"], required=True, ), IntInput( @@ -39,6 +38,13 @@ class SplitTextComponent(Component): info="The character to split on. Defaults to newline.", value="\n", ), + MessageTextInput( + name="text_key", + display_name="Text Key", + info="The key to use for the text column.", + value="text", + advanced=True, + ), ] outputs = [ @@ -46,23 +52,57 @@ class SplitTextComponent(Component): Output(display_name="DataFrame", name="dataframe", method="as_dataframe"), ] - def _docs_to_data(self, docs): + def _docs_to_data(self, docs) -> list[Data]: return [Data(text=doc.page_content, data=doc.metadata) for doc in docs] - def split_text(self) -> list[Data]: + def _docs_to_dataframe(self, docs): + data_dicts = [{self.text_key: doc.page_content, **doc.metadata} for doc in docs] + return DataFrame(data_dicts) + + def split_text_base(self): separator = unescape_string(self.separator) + if isinstance(self.data_inputs, DataFrame): + if not len(self.data_inputs): + msg = "DataFrame is empty" + raise TypeError(msg) - documents = [_input.to_lc_document() for _input in self.data_inputs if isinstance(_input, Data)] + self.data_inputs.text_key = self.text_key + try: + documents = self.data_inputs.to_lc_documents() + except Exception as e: + msg = f"Error converting DataFrame to documents: {e}" + raise TypeError(msg) from e + else: + if not self.data_inputs: + msg = "No data inputs provided" + raise TypeError(msg) - splitter = CharacterTextSplitter( - chunk_overlap=self.chunk_overlap, - chunk_size=self.chunk_size, - separator=separator, - ) - docs = splitter.split_documents(documents) - data = self._docs_to_data(docs) - self.status = data - return data + documents = [] + if isinstance(self.data_inputs, Data): + self.data_inputs.text_key = self.text_key + documents = [self.data_inputs.to_lc_document()] + else: + try: + documents = [input_.to_lc_document() for input_ in self.data_inputs if isinstance(input_, Data)] + if not documents: + msg = f"No valid Data inputs found in {type(self.data_inputs)}" + raise TypeError(msg) + except AttributeError as e: + msg = f"Invalid input type in collection: {e}" + raise TypeError(msg) from e + try: + splitter = CharacterTextSplitter( + chunk_overlap=self.chunk_overlap, + chunk_size=self.chunk_size, + separator=separator, + ) + return splitter.split_documents(documents) + except Exception as e: + msg = f"Error splitting text: {e}" + raise TypeError(msg) from e + + def split_text(self) -> list[Data]: + return self._docs_to_data(self.split_text_base()) def as_dataframe(self) -> DataFrame: - return DataFrame(self.split_text()) + return self._docs_to_dataframe(self.split_text_base()) diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json b/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json index 585ca5a77..9c578f59c 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json @@ -7,7 +7,7 @@ "data": { "sourceHandle": { "dataType": "ParseData", - "id": "ParseData-mnAoF", + "id": "ParseData-r0bSI", "name": "text", "output_types": [ "Message" @@ -15,7 +15,7 @@ }, "targetHandle": { "fieldName": "context", - "id": "Prompt-hdHKZ", + "id": "Prompt-kTvlf", "inputTypes": [ "Message", "Text" @@ -23,11 +23,11 @@ "type": "str" } }, - "id": "reactflow__edge-ParseData-mnAoF{œdataTypeœ:œParseDataœ,œidœ:œParseData-mnAoFœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-Prompt-hdHKZ{œfieldNameœ:œcontextœ,œidœ:œPrompt-hdHKZœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}", - "source": "ParseData-mnAoF", - "sourceHandle": "{œdataTypeœ: œParseDataœ, œidœ: œParseData-mnAoFœ, œnameœ: œtextœ, œoutput_typesœ: [œMessageœ]}", - "target": "Prompt-hdHKZ", - "targetHandle": "{œfieldNameœ: œcontextœ, œidœ: œPrompt-hdHKZœ, œinputTypesœ: [œMessageœ, œTextœ], œtypeœ: œstrœ}" + "id": "reactflow__edge-ParseData-r0bSI{œdataTypeœ:œParseDataœ,œidœ:œParseData-r0bSIœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-Prompt-kTvlf{œfieldNameœ:œcontextœ,œidœ:œPrompt-kTvlfœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}", + "source": "ParseData-r0bSI", + "sourceHandle": "{œdataTypeœ: œParseDataœ, œidœ: œParseData-r0bSIœ, œnameœ: œtextœ, œoutput_typesœ: [œMessageœ]}", + "target": "Prompt-kTvlf", + "targetHandle": "{œfieldNameœ: œcontextœ, œidœ: œPrompt-kTvlfœ, œinputTypesœ: [œMessageœ, œTextœ], œtypeœ: œstrœ}" }, { "animated": false, @@ -35,7 +35,7 @@ "data": { "sourceHandle": { "dataType": "ChatInput", - "id": "ChatInput-8NQlz", + "id": "ChatInput-rfENu", "name": "message", "output_types": [ "Message" @@ -43,7 +43,7 @@ }, "targetHandle": { "fieldName": "question", - "id": "Prompt-hdHKZ", + "id": "Prompt-kTvlf", "inputTypes": [ "Message", "Text" @@ -51,11 +51,11 @@ "type": "str" } }, - "id": "reactflow__edge-ChatInput-8NQlz{œdataTypeœ:œChatInputœ,œidœ:œChatInput-8NQlzœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-Prompt-hdHKZ{œfieldNameœ:œquestionœ,œidœ:œPrompt-hdHKZœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}", - "source": "ChatInput-8NQlz", - "sourceHandle": "{œdataTypeœ: œChatInputœ, œidœ: œChatInput-8NQlzœ, œnameœ: œmessageœ, œoutput_typesœ: [œMessageœ]}", - "target": "Prompt-hdHKZ", - "targetHandle": "{œfieldNameœ: œquestionœ, œidœ: œPrompt-hdHKZœ, œinputTypesœ: [œMessageœ, œTextœ], œtypeœ: œstrœ}" + "id": "reactflow__edge-ChatInput-rfENu{œdataTypeœ:œChatInputœ,œidœ:œChatInput-rfENuœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-Prompt-kTvlf{œfieldNameœ:œquestionœ,œidœ:œPrompt-kTvlfœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}", + "source": "ChatInput-rfENu", + "sourceHandle": "{œdataTypeœ: œChatInputœ, œidœ: œChatInput-rfENuœ, œnameœ: œmessageœ, œoutput_typesœ: [œMessageœ]}", + "target": "Prompt-kTvlf", + "targetHandle": "{œfieldNameœ: œquestionœ, œidœ: œPrompt-kTvlfœ, œinputTypesœ: [œMessageœ, œTextœ], œtypeœ: œstrœ}" }, { "animated": false, @@ -63,7 +63,7 @@ "data": { "sourceHandle": { "dataType": "File", - "id": "File-7fTNc", + "id": "File-HapKo", "name": "data", "output_types": [ "Data" @@ -71,25 +71,26 @@ }, "targetHandle": { "fieldName": "data_inputs", - "id": "SplitText-oI0KK", + "id": "SplitText-1BFPH", "inputTypes": [ - "Data" + "Data", + "DataFrame" ], "type": "other" } }, - "id": "reactflow__edge-File-7fTNc{œdataTypeœ:œFileœ,œidœ:œFile-7fTNcœ,œnameœ:œdataœ,œoutput_typesœ:[œDataœ]}-SplitText-oI0KK{œfieldNameœ:œdata_inputsœ,œidœ:œSplitText-oI0KKœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", - "source": "File-7fTNc", - "sourceHandle": "{œdataTypeœ: œFileœ, œidœ: œFile-7fTNcœ, œnameœ: œdataœ, œoutput_typesœ: [œDataœ]}", - "target": "SplitText-oI0KK", - "targetHandle": "{œfieldNameœ: œdata_inputsœ, œidœ: œSplitText-oI0KKœ, œinputTypesœ: [œDataœ], œtypeœ: œotherœ}" + "id": "reactflow__edge-File-HapKo{œdataTypeœ:œFileœ,œidœ:œFile-HapKoœ,œnameœ:œdataœ,œoutput_typesœ:[œDataœ]}-SplitText-1BFPH{œfieldNameœ:œdata_inputsœ,œidœ:œSplitText-1BFPHœ,œinputTypesœ:[œDataœ,œDataFrameœ],œtypeœ:œotherœ}", + "source": "File-HapKo", + "sourceHandle": "{œdataTypeœ: œFileœ, œidœ: œFile-HapKoœ, œnameœ: œdataœ, œoutput_typesœ: [œDataœ]}", + "target": "SplitText-1BFPH", + "targetHandle": "{œfieldNameœ: œdata_inputsœ, œidœ: œSplitText-1BFPHœ, œinputTypesœ: [œDataœ, œDataFrameœ], œtypeœ: œotherœ}" }, { "className": "", "data": { "sourceHandle": { "dataType": "Prompt", - "id": "Prompt-hdHKZ", + "id": "Prompt-kTvlf", "name": "prompt", "output_types": [ "Message" @@ -97,154 +98,25 @@ }, "targetHandle": { "fieldName": "input_value", - "id": "OpenAIModel-690Qx", + "id": "OpenAIModel-EseEF", "inputTypes": [ "Message" ], "type": "str" } }, - "id": "reactflow__edge-Prompt-hdHKZ{œdataTypeœ:œPromptœ,œidœ:œPrompt-hdHKZœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}-OpenAIModel-690Qx{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-690Qxœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", - "source": "Prompt-hdHKZ", - "sourceHandle": "{œdataTypeœ: œPromptœ, œidœ: œPrompt-hdHKZœ, œnameœ: œpromptœ, œoutput_typesœ: [œMessageœ]}", - "target": "OpenAIModel-690Qx", - "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œOpenAIModel-690Qxœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}" + "id": "reactflow__edge-Prompt-kTvlf{œdataTypeœ:œPromptœ,œidœ:œPrompt-kTvlfœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}-OpenAIModel-EseEF{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-EseEFœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "source": "Prompt-kTvlf", + "sourceHandle": "{œdataTypeœ: œPromptœ, œidœ: œPrompt-kTvlfœ, œnameœ: œpromptœ, œoutput_typesœ: [œMessageœ]}", + "target": "OpenAIModel-EseEF", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œOpenAIModel-EseEFœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}" }, { "className": "", - "data": { - "sourceHandle": { - "dataType": "OpenAIEmbeddings", - "id": "OpenAIEmbeddings-u7coL", - "name": "embeddings", - "output_types": [ - "Embeddings" - ] - }, - "targetHandle": { - "fieldName": "embedding_model", - "id": "AstraDB-UZ35f", - "inputTypes": [ - "Embeddings" - ], - "type": "other" - } - }, - "id": "reactflow__edge-OpenAIEmbeddings-u7coL{œdataTypeœ:œOpenAIEmbeddingsœ,œidœ:œOpenAIEmbeddings-u7coLœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-AstraDB-UZ35f{œfieldNameœ:œembedding_modelœ,œidœ:œAstraDB-UZ35fœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}", - "source": "OpenAIEmbeddings-u7coL", - "sourceHandle": "{œdataTypeœ: œOpenAIEmbeddingsœ, œidœ: œOpenAIEmbeddings-u7coLœ, œnameœ: œembeddingsœ, œoutput_typesœ: [œEmbeddingsœ]}", - "target": "AstraDB-UZ35f", - "targetHandle": "{œfieldNameœ: œembedding_modelœ, œidœ: œAstraDB-UZ35fœ, œinputTypesœ: [œEmbeddingsœ], œtypeœ: œotherœ}" - }, - { - "className": "", - "data": { - "sourceHandle": { - "dataType": "SplitText", - "id": "SplitText-oI0KK", - "name": "chunks", - "output_types": [ - "Data" - ] - }, - "targetHandle": { - "fieldName": "ingest_data", - "id": "AstraDB-UZ35f", - "inputTypes": [ - "Data" - ], - "type": "other" - } - }, - "id": "reactflow__edge-SplitText-oI0KK{œdataTypeœ:œSplitTextœ,œidœ:œSplitText-oI0KKœ,œnameœ:œchunksœ,œoutput_typesœ:[œDataœ]}-AstraDB-UZ35f{œfieldNameœ:œingest_dataœ,œidœ:œAstraDB-UZ35fœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", - "source": "SplitText-oI0KK", - "sourceHandle": "{œdataTypeœ: œSplitTextœ, œidœ: œSplitText-oI0KKœ, œnameœ: œchunksœ, œoutput_typesœ: [œDataœ]}", - "target": "AstraDB-UZ35f", - "targetHandle": "{œfieldNameœ: œingest_dataœ, œidœ: œAstraDB-UZ35fœ, œinputTypesœ: [œDataœ], œtypeœ: œotherœ}" - }, - { - "className": "", - "data": { - "sourceHandle": { - "dataType": "ChatInput", - "id": "ChatInput-8NQlz", - "name": "message", - "output_types": [ - "Message" - ] - }, - "targetHandle": { - "fieldName": "search_query", - "id": "AstraDB-Etytu", - "inputTypes": [ - "Message" - ], - "type": "str" - } - }, - "id": "reactflow__edge-ChatInput-8NQlz{œdataTypeœ:œChatInputœ,œidœ:œChatInput-8NQlzœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-AstraDB-Etytu{œfieldNameœ:œsearch_queryœ,œidœ:œAstraDB-Etytuœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", - "source": "ChatInput-8NQlz", - "sourceHandle": "{œdataTypeœ: œChatInputœ, œidœ: œChatInput-8NQlzœ, œnameœ: œmessageœ, œoutput_typesœ: [œMessageœ]}", - "target": "AstraDB-Etytu", - "targetHandle": "{œfieldNameœ: œsearch_queryœ, œidœ: œAstraDB-Etytuœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}" - }, - { - "className": "", - "data": { - "sourceHandle": { - "dataType": "OpenAIEmbeddings", - "id": "OpenAIEmbeddings-eerk9", - "name": "embeddings", - "output_types": [ - "Embeddings" - ] - }, - "targetHandle": { - "fieldName": "embedding_model", - "id": "AstraDB-Etytu", - "inputTypes": [ - "Embeddings" - ], - "type": "other" - } - }, - "id": "reactflow__edge-OpenAIEmbeddings-eerk9{œdataTypeœ:œOpenAIEmbeddingsœ,œidœ:œOpenAIEmbeddings-eerk9œ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-AstraDB-Etytu{œfieldNameœ:œembedding_modelœ,œidœ:œAstraDB-Etytuœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}", - "source": "OpenAIEmbeddings-eerk9", - "sourceHandle": "{œdataTypeœ: œOpenAIEmbeddingsœ, œidœ: œOpenAIEmbeddings-eerk9œ, œnameœ: œembeddingsœ, œoutput_typesœ: [œEmbeddingsœ]}", - "target": "AstraDB-Etytu", - "targetHandle": "{œfieldNameœ: œembedding_modelœ, œidœ: œAstraDB-Etytuœ, œinputTypesœ: [œEmbeddingsœ], œtypeœ: œotherœ}" - }, - { - "className": "", - "data": { - "sourceHandle": { - "dataType": "AstraDB", - "id": "AstraDB-Etytu", - "name": "search_results", - "output_types": [ - "Data" - ] - }, - "targetHandle": { - "fieldName": "data", - "id": "ParseData-mnAoF", - "inputTypes": [ - "Data" - ], - "type": "other" - } - }, - "id": "reactflow__edge-AstraDB-Etytu{œdataTypeœ:œAstraDBœ,œidœ:œAstraDB-Etytuœ,œnameœ:œsearch_resultsœ,œoutput_typesœ:[œDataœ]}-ParseData-mnAoF{œfieldNameœ:œdataœ,œidœ:œParseData-mnAoFœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", - "source": "AstraDB-Etytu", - "sourceHandle": "{œdataTypeœ: œAstraDBœ, œidœ: œAstraDB-Etytuœ, œnameœ: œsearch_resultsœ, œoutput_typesœ: [œDataœ]}", - "target": "ParseData-mnAoF", - "targetHandle": "{œfieldNameœ: œdataœ, œidœ: œParseData-mnAoFœ, œinputTypesœ: [œDataœ], œtypeœ: œotherœ}" - }, - { "data": { "sourceHandle": { "dataType": "OpenAIModel", - "id": "OpenAIModel-690Qx", + "id": "OpenAIModel-EseEF", "name": "text_output", "output_types": [ "Message" @@ -252,7 +124,7 @@ }, "targetHandle": { "fieldName": "input_value", - "id": "ChatOutput-zpEiC", + "id": "ChatOutput-GDLzU", "inputTypes": [ "Data", "DataFrame", @@ -261,11 +133,140 @@ "type": "str" } }, - "id": "xy-edge__OpenAIModel-690Qx{œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-690Qxœ,œnameœ:œtext_outputœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-zpEiC{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-zpEiCœ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œstrœ}", - "source": "OpenAIModel-690Qx", - "sourceHandle": "{œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-690Qxœ, œnameœ: œtext_outputœ, œoutput_typesœ: [œMessageœ]}", - "target": "ChatOutput-zpEiC", - "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œChatOutput-zpEiCœ, œinputTypesœ: [œDataœ, œDataFrameœ, œMessageœ], œtypeœ: œstrœ}" + "id": "reactflow__edge-OpenAIModel-EseEF{œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-EseEFœ,œnameœ:œtext_outputœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-GDLzU{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-GDLzUœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "source": "OpenAIModel-EseEF", + "sourceHandle": "{œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-EseEFœ, œnameœ: œtext_outputœ, œoutput_typesœ: [œMessageœ]}", + "target": "ChatOutput-GDLzU", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œChatOutput-GDLzUœ, œinputTypesœ: [œDataœ, œDataFrameœ, œMessageœ], œtypeœ: œstrœ}" + }, + { + "className": "", + "data": { + "sourceHandle": { + "dataType": "OpenAIEmbeddings", + "id": "OpenAIEmbeddings-pjVd0", + "name": "embeddings", + "output_types": [ + "Embeddings" + ] + }, + "targetHandle": { + "fieldName": "embedding_model", + "id": "AstraDB-XU7Xo", + "inputTypes": [ + "Embeddings" + ], + "type": "other" + } + }, + "id": "reactflow__edge-OpenAIEmbeddings-pjVd0{œdataTypeœ:œOpenAIEmbeddingsœ,œidœ:œOpenAIEmbeddings-pjVd0œ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-AstraDB-XU7Xo{œfieldNameœ:œembedding_modelœ,œidœ:œAstraDB-XU7Xoœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}", + "source": "OpenAIEmbeddings-pjVd0", + "sourceHandle": "{œdataTypeœ: œOpenAIEmbeddingsœ, œidœ: œOpenAIEmbeddings-pjVd0œ, œnameœ: œembeddingsœ, œoutput_typesœ: [œEmbeddingsœ]}", + "target": "AstraDB-XU7Xo", + "targetHandle": "{œfieldNameœ: œembedding_modelœ, œidœ: œAstraDB-XU7Xoœ, œinputTypesœ: [œEmbeddingsœ], œtypeœ: œotherœ}" + }, + { + "className": "", + "data": { + "sourceHandle": { + "dataType": "ChatInput", + "id": "ChatInput-rfENu", + "name": "message", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "search_query", + "id": "AstraDB-XU7Xo", + "inputTypes": [ + "Message" + ], + "type": "str" + } + }, + "id": "reactflow__edge-ChatInput-rfENu{œdataTypeœ:œChatInputœ,œidœ:œChatInput-rfENuœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-AstraDB-XU7Xo{œfieldNameœ:œsearch_queryœ,œidœ:œAstraDB-XU7Xoœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "source": "ChatInput-rfENu", + "sourceHandle": "{œdataTypeœ: œChatInputœ, œidœ: œChatInput-rfENuœ, œnameœ: œmessageœ, œoutput_typesœ: [œMessageœ]}", + "target": "AstraDB-XU7Xo", + "targetHandle": "{œfieldNameœ: œsearch_queryœ, œidœ: œAstraDB-XU7Xoœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}" + }, + { + "className": "", + "data": { + "sourceHandle": { + "dataType": "AstraDB", + "id": "AstraDB-XU7Xo", + "name": "search_results", + "output_types": [ + "Data" + ] + }, + "targetHandle": { + "fieldName": "data", + "id": "ParseData-r0bSI", + "inputTypes": [ + "Data" + ], + "type": "other" + } + }, + "id": "reactflow__edge-AstraDB-XU7Xo{œdataTypeœ:œAstraDBœ,œidœ:œAstraDB-XU7Xoœ,œnameœ:œsearch_resultsœ,œoutput_typesœ:[œDataœ]}-ParseData-r0bSI{œfieldNameœ:œdataœ,œidœ:œParseData-r0bSIœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "source": "AstraDB-XU7Xo", + "sourceHandle": "{œdataTypeœ: œAstraDBœ, œidœ: œAstraDB-XU7Xoœ, œnameœ: œsearch_resultsœ, œoutput_typesœ: [œDataœ]}", + "target": "ParseData-r0bSI", + "targetHandle": "{œfieldNameœ: œdataœ, œidœ: œParseData-r0bSIœ, œinputTypesœ: [œDataœ], œtypeœ: œotherœ}" + }, + { + "className": "", + "data": { + "sourceHandle": { + "dataType": "OpenAIEmbeddings", + "id": "OpenAIEmbeddings-jn7pG", + "name": "embeddings", + "output_types": [ + "Embeddings" + ] + }, + "targetHandle": { + "fieldName": "embedding_model", + "id": "AstraDB-fyg6q", + "inputTypes": [ + "Embeddings" + ], + "type": "other" + } + }, + "id": "reactflow__edge-OpenAIEmbeddings-jn7pG{œdataTypeœ:œOpenAIEmbeddingsœ,œidœ:œOpenAIEmbeddings-jn7pGœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-AstraDB-fyg6q{œfieldNameœ:œembedding_modelœ,œidœ:œAstraDB-fyg6qœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}", + "source": "OpenAIEmbeddings-jn7pG", + "sourceHandle": "{œdataTypeœ: œOpenAIEmbeddingsœ, œidœ: œOpenAIEmbeddings-jn7pGœ, œnameœ: œembeddingsœ, œoutput_typesœ: [œEmbeddingsœ]}", + "target": "AstraDB-fyg6q", + "targetHandle": "{œfieldNameœ: œembedding_modelœ, œidœ: œAstraDB-fyg6qœ, œinputTypesœ: [œEmbeddingsœ], œtypeœ: œotherœ}" + }, + { + "data": { + "sourceHandle": { + "dataType": "SplitText", + "id": "SplitText-1BFPH", + "name": "chunks", + "output_types": [ + "Data" + ] + }, + "targetHandle": { + "fieldName": "ingest_data", + "id": "AstraDB-fyg6q", + "inputTypes": [ + "Data" + ], + "type": "other" + } + }, + "id": "xy-edge__SplitText-1BFPH{œdataTypeœ:œSplitTextœ,œidœ:œSplitText-1BFPHœ,œnameœ:œchunksœ,œoutput_typesœ:[œDataœ]}-AstraDB-fyg6q{œfieldNameœ:œingest_dataœ,œidœ:œAstraDB-fyg6qœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "source": "SplitText-1BFPH", + "sourceHandle": "{œdataTypeœ: œSplitTextœ, œidœ: œSplitText-1BFPHœ, œnameœ: œchunksœ, œoutput_typesœ: [œDataœ]}", + "target": "AstraDB-fyg6q", + "targetHandle": "{œfieldNameœ: œingest_dataœ, œidœ: œAstraDB-fyg6qœ, œinputTypesœ: [œDataœ], œtypeœ: œotherœ}" } ], "nodes": [ @@ -273,7 +274,7 @@ "data": { "description": "Get chat inputs from the Playground.", "display_name": "Chat Input", - "id": "ChatInput-8NQlz", + "id": "ChatInput-rfENu", "node": { "base_classes": [ "Message" @@ -537,7 +538,7 @@ }, "dragging": false, "height": 234, - "id": "ChatInput-8NQlz", + "id": "ChatInput-rfENu", "measured": { "height": 234, "width": 360 @@ -558,7 +559,7 @@ "data": { "description": "Convert Data into plain text following a specified template.", "display_name": "Parse Data", - "id": "ParseData-mnAoF", + "id": "ParseData-r0bSI", "node": { "base_classes": [ "Message" @@ -694,7 +695,7 @@ }, "dragging": false, "height": 350, - "id": "ParseData-mnAoF", + "id": "ParseData-r0bSI", "measured": { "height": 350, "width": 360 @@ -715,7 +716,7 @@ "data": { "description": "Create a prompt template with dynamic variables.", "display_name": "Prompt", - "id": "Prompt-hdHKZ", + "id": "Prompt-kTvlf", "node": { "base_classes": [ "Message" @@ -874,7 +875,7 @@ }, "dragging": false, "height": 433, - "id": "Prompt-hdHKZ", + "id": "Prompt-kTvlf", "measured": { "height": 433, "width": 360 @@ -895,7 +896,7 @@ "data": { "description": "Split text into chunks based on specified criteria.", "display_name": "Split Text", - "id": "SplitText-oI0KK", + "id": "SplitText-1BFPH", "node": { "base_classes": [ "Data" @@ -996,17 +997,18 @@ "show": true, "title_case": false, "type": "code", - "value": "from langchain_text_splitters import CharacterTextSplitter\n\nfrom langflow.custom import Component\nfrom langflow.io import HandleInput, IntInput, MessageTextInput, Output\nfrom langflow.schema import Data, DataFrame\nfrom langflow.utils.util import unescape_string\n\n\nclass SplitTextComponent(Component):\n display_name: str = \"Split Text\"\n description: str = \"Split text into chunks based on specified criteria.\"\n icon = \"scissors-line-dashed\"\n name = \"SplitText\"\n\n inputs = [\n HandleInput(\n name=\"data_inputs\",\n display_name=\"Data Inputs\",\n info=\"The data to split.\",\n input_types=[\"Data\"],\n is_list=True,\n required=True,\n ),\n IntInput(\n name=\"chunk_overlap\",\n display_name=\"Chunk Overlap\",\n info=\"Number of characters to overlap between chunks.\",\n value=200,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n info=\"The maximum number of characters in each chunk.\",\n value=1000,\n ),\n MessageTextInput(\n name=\"separator\",\n display_name=\"Separator\",\n info=\"The character to split on. Defaults to newline.\",\n value=\"\\n\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Chunks\", name=\"chunks\", method=\"split_text\"),\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"as_dataframe\"),\n ]\n\n def _docs_to_data(self, docs):\n return [Data(text=doc.page_content, data=doc.metadata) for doc in docs]\n\n def split_text(self) -> list[Data]:\n separator = unescape_string(self.separator)\n\n documents = [_input.to_lc_document() for _input in self.data_inputs if isinstance(_input, Data)]\n\n splitter = CharacterTextSplitter(\n chunk_overlap=self.chunk_overlap,\n chunk_size=self.chunk_size,\n separator=separator,\n )\n docs = splitter.split_documents(documents)\n data = self._docs_to_data(docs)\n self.status = data\n return data\n\n def as_dataframe(self) -> DataFrame:\n return DataFrame(self.split_text())\n" + "value": "from langchain_text_splitters import CharacterTextSplitter\n\nfrom langflow.custom import Component\nfrom langflow.io import HandleInput, IntInput, MessageTextInput, Output\nfrom langflow.schema import Data, DataFrame\nfrom langflow.utils.util import unescape_string\n\n\nclass SplitTextComponent(Component):\n display_name: str = \"Split Text\"\n description: str = \"Split text into chunks based on specified criteria.\"\n icon = \"scissors-line-dashed\"\n name = \"SplitText\"\n\n inputs = [\n HandleInput(\n name=\"data_inputs\",\n display_name=\"Input Documents\",\n info=\"The data to split.\",\n input_types=[\"Data\", \"DataFrame\"],\n required=True,\n ),\n IntInput(\n name=\"chunk_overlap\",\n display_name=\"Chunk Overlap\",\n info=\"Number of characters to overlap between chunks.\",\n value=200,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n info=\"The maximum number of characters in each chunk.\",\n value=1000,\n ),\n MessageTextInput(\n name=\"separator\",\n display_name=\"Separator\",\n info=\"The character to split on. Defaults to newline.\",\n value=\"\\n\",\n ),\n MessageTextInput(\n name=\"text_key\",\n display_name=\"Text Key\",\n info=\"The key to use for the text column.\",\n value=\"text\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Chunks\", name=\"chunks\", method=\"split_text\"),\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"as_dataframe\"),\n ]\n\n def _docs_to_data(self, docs) -> list[Data]:\n return [Data(text=doc.page_content, data=doc.metadata) for doc in docs]\n\n def _docs_to_dataframe(self, docs):\n data_dicts = [{self.text_key: doc.page_content, **doc.metadata} for doc in docs]\n return DataFrame(data_dicts)\n\n def split_text_base(self):\n separator = unescape_string(self.separator)\n if isinstance(self.data_inputs, DataFrame):\n if not len(self.data_inputs):\n msg = \"DataFrame is empty\"\n raise TypeError(msg)\n\n self.data_inputs.text_key = self.text_key\n try:\n documents = self.data_inputs.to_lc_documents()\n except Exception as e:\n msg = f\"Error converting DataFrame to documents: {e}\"\n raise TypeError(msg) from e\n else:\n if not self.data_inputs:\n msg = \"No data inputs provided\"\n raise TypeError(msg)\n\n documents = []\n if isinstance(self.data_inputs, Data):\n self.data_inputs.text_key = self.text_key\n documents = [self.data_inputs.to_lc_document()]\n else:\n try:\n documents = [input_.to_lc_document() for input_ in self.data_inputs if isinstance(input_, Data)]\n if not documents:\n msg = f\"No valid Data inputs found in {type(self.data_inputs)}\"\n raise TypeError(msg)\n except AttributeError as e:\n msg = f\"Invalid input type in collection: {e}\"\n raise TypeError(msg) from e\n try:\n splitter = CharacterTextSplitter(\n chunk_overlap=self.chunk_overlap,\n chunk_size=self.chunk_size,\n separator=separator,\n )\n return splitter.split_documents(documents)\n except Exception as e:\n msg = f\"Error splitting text: {e}\"\n raise TypeError(msg) from e\n\n def split_text(self) -> list[Data]:\n return self._docs_to_data(self.split_text_base())\n\n def as_dataframe(self) -> DataFrame:\n return self._docs_to_dataframe(self.split_text_base())\n" }, "data_inputs": { "advanced": false, - "display_name": "Data Inputs", + "display_name": "Input Documents", "dynamic": false, "info": "The data to split.", "input_types": [ - "Data" + "Data", + "DataFrame" ], - "list": true, + "list": false, "name": "data_inputs", "placeholder": "", "required": true, @@ -1035,6 +1037,29 @@ "trace_as_metadata": true, "type": "str", "value": "\n" + }, + "text_key": { + "_input_type": "MessageTextInput", + "advanced": true, + "display_name": "Text Key", + "dynamic": false, + "info": "The key to use for the text column.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "text_key", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "type": "str", + "value": "text" } } }, @@ -1042,7 +1067,7 @@ }, "dragging": false, "height": 475, - "id": "SplitText-oI0KK", + "id": "SplitText-1BFPH", "measured": { "height": 475, "width": 360 @@ -1061,7 +1086,7 @@ }, { "data": { - "id": "note-8MY1o", + "id": "note-j1WAV", "node": { "description": "## 🐕 2. Retriever Flow\n\nThis flow answers your questions with contextual data retrieved from your vector database.\n\nOpen the **Playground** and ask, \n\n```\nWhat is this document about?\n```\n", "display_name": "", @@ -1074,7 +1099,7 @@ }, "dragging": false, "height": 324, - "id": "note-8MY1o", + "id": "note-j1WAV", "measured": { "height": 324, "width": 328 @@ -1098,7 +1123,7 @@ }, { "data": { - "id": "note-gi1uB", + "id": "note-ab4Wr", "node": { "description": "## 📖 README\n\nLoad your data into a vector database with the 📚 **Load Data** flow, and then use your data as chat context with the 🐕 **Retriever** flow.\n\n**🚨 Add your OpenAI API key as a global variable to easily add it to all of the OpenAI components in this flow.** \n\n**Quick start**\n1. Run the 📚 **Load Data** flow.\n2. Run the 🐕 **Retriever** flow.\n\n**Next steps** \n\n- Experiment by changing the prompt and the loaded data to see how the bot's responses change. \n\nFor more info, see the [Langflow docs](https://docs.langflow.org/starter-projects-vector-store-rag).", "display_name": "Read Me", @@ -1111,7 +1136,7 @@ }, "dragging": false, "height": 324, - "id": "note-gi1uB", + "id": "note-ab4Wr", "measured": { "height": 324, "width": 328 @@ -1137,7 +1162,7 @@ "data": { "description": "Display a chat message in the Playground.", "display_name": "Chat Output", - "id": "ChatOutput-zpEiC", + "id": "ChatOutput-GDLzU", "node": { "base_classes": [ "Message" @@ -1419,7 +1444,7 @@ }, "dragging": false, "height": 234, - "id": "ChatOutput-zpEiC", + "id": "ChatOutput-GDLzU", "measured": { "height": 234, "width": 360 @@ -1438,7 +1463,7 @@ }, { "data": { - "id": "OpenAIEmbeddings-eerk9", + "id": "OpenAIEmbeddings-pjVd0", "node": { "base_classes": [ "Embeddings" @@ -1727,7 +1752,7 @@ "input_types": [ "Message" ], - "load_from_db": true, + "load_from_db": false, "name": "openai_api_key", "password": true, "placeholder": "", @@ -1918,7 +1943,7 @@ }, "dragging": false, "height": 320, - "id": "OpenAIEmbeddings-eerk9", + "id": "OpenAIEmbeddings-pjVd0", "measured": { "height": 320, "width": 360 @@ -1937,7 +1962,7 @@ }, { "data": { - "id": "note-CW9QR", + "id": "note-9zl9H", "node": { "description": "## 📚 1. Load Data Flow\n\nRun this first! Load data from a local file and embed it into the vector database.\n\nSelect a Database and a Collection, or create new ones. \n\nClick ▶️ **Run component** on the **Astra DB** component to load your data.\n\n* If you're using OSS Langflow, add your Astra DB Application Token to the Astra DB component.\n\n#### Next steps:\n Experiment by changing the prompt and the contextual data to see how the retrieval flow's responses change.", "display_name": "", @@ -1950,7 +1975,7 @@ }, "dragging": false, "height": 324, - "id": "note-CW9QR", + "id": "note-9zl9H", "measured": { "height": 324, "width": 328 @@ -1974,7 +1999,7 @@ }, { "data": { - "id": "OpenAIEmbeddings-u7coL", + "id": "OpenAIEmbeddings-jn7pG", "node": { "base_classes": [ "Embeddings" @@ -2263,7 +2288,7 @@ "input_types": [ "Message" ], - "load_from_db": true, + "load_from_db": false, "name": "openai_api_key", "password": true, "placeholder": "", @@ -2454,7 +2479,7 @@ }, "dragging": false, "height": 320, - "id": "OpenAIEmbeddings-u7coL", + "id": "OpenAIEmbeddings-jn7pG", "measured": { "height": 320, "width": 360 @@ -2473,7 +2498,7 @@ }, { "data": { - "id": "File-7fTNc", + "id": "File-HapKo", "node": { "base_classes": [ "Data" @@ -2699,7 +2724,7 @@ }, "dragging": false, "height": 367, - "id": "File-7fTNc", + "id": "File-HapKo", "measured": { "height": 367, "width": 360 @@ -2718,7 +2743,7 @@ }, { "data": { - "id": "note-WQR48", + "id": "note-ZIyRP", "node": { "description": "### 💡 Add your OpenAI API key here 👇", "display_name": "", @@ -2731,7 +2756,7 @@ }, "dragging": false, "height": 324, - "id": "note-WQR48", + "id": "note-ZIyRP", "measured": { "height": 324, "width": 326 @@ -2750,7 +2775,7 @@ }, { "data": { - "id": "note-yMoT0", + "id": "note-Bka84", "node": { "description": "### 💡 Add your OpenAI API key here 👇", "display_name": "", @@ -2763,7 +2788,7 @@ }, "dragging": false, "height": 324, - "id": "note-yMoT0", + "id": "note-Bka84", "measured": { "height": 324, "width": 326 @@ -2782,7 +2807,7 @@ }, { "data": { - "id": "note-nfo9c", + "id": "note-DbdD7", "node": { "description": "### 💡 Add your OpenAI API key here 👇", "display_name": "", @@ -2795,7 +2820,7 @@ }, "dragging": false, "height": 324, - "id": "note-nfo9c", + "id": "note-DbdD7", "measured": { "height": 324, "width": 326 @@ -2814,7 +2839,7 @@ }, { "data": { - "id": "OpenAIModel-690Qx", + "id": "OpenAIModel-EseEF", "node": { "base_classes": [ "LanguageModel", @@ -2893,7 +2918,7 @@ "input_types": [ "Message" ], - "load_from_db": true, + "load_from_db": false, "name": "api_key", "password": true, "placeholder": "", @@ -3182,7 +3207,7 @@ "type": "OpenAIModel" }, "dragging": false, - "id": "OpenAIModel-690Qx", + "id": "OpenAIModel-EseEF", "measured": { "height": 734, "width": 360 @@ -3196,687 +3221,7 @@ }, { "data": { - "id": "AstraDB-UZ35f", - "node": { - "base_classes": [ - "Data", - "DataFrame" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "Ingest and search documents in Astra DB", - "display_name": "Astra DB", - "documentation": "https://docs.datastax.com/en/langflow/astra-components.html", - "edited": false, - "field_order": [ - "token", - "environment", - "database_name", - "api_endpoint", - "collection_name", - "keyspace", - "embedding_choice", - "embedding_model", - "ingest_data", - "search_query", - "number_of_results", - "search_type", - "search_score_threshold", - "advanced_search_filter", - "autodetect_collection", - "content_field", - "deletion_field", - "ignore_invalid_documents", - "astradb_vectorstore_kwargs" - ], - "frozen": false, - "icon": "AstraDB", - "legacy": false, - "metadata": {}, - "minimized": false, - "output_types": [], - "outputs": [ - { - "allows_loop": false, - "cache": true, - "display_name": "Search Results", - "method": "search_documents", - "name": "search_results", - "required_inputs": [ - "collection_name", - "database_name", - "token" - ], - "selected": "Data", - "tool_mode": true, - "types": [ - "Data" - ], - "value": "__UNDEFINED__" - }, - { - "allows_loop": false, - "cache": true, - "display_name": "DataFrame", - "method": "as_dataframe", - "name": "dataframe", - "required_inputs": [], - "selected": "DataFrame", - "tool_mode": true, - "types": [ - "DataFrame" - ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", - "advanced_search_filter": { - "_input_type": "NestedDictInput", - "advanced": true, - "display_name": "Search Metadata Filter", - "dynamic": false, - "info": "Optional dictionary of filters to apply to the search query.", - "list": false, - "list_add_label": "Add More", - "name": "advanced_search_filter", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "NestedDict", - "value": {} - }, - "api_endpoint": { - "_input_type": "StrInput", - "advanced": true, - "display_name": "Astra DB API Endpoint", - "dynamic": false, - "info": "The API Endpoint for the Astra DB instance. Supercedes database selection.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "api_endpoint", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "astradb_vectorstore_kwargs": { - "_input_type": "NestedDictInput", - "advanced": true, - "display_name": "AstraDBVectorStore Parameters", - "dynamic": false, - "info": "Optional dictionary of additional parameters for the AstraDBVectorStore.", - "list": false, - "list_add_label": "Add More", - "name": "astradb_vectorstore_kwargs", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "NestedDict", - "value": {} - }, - "autodetect_collection": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Autodetect Collection", - "dynamic": false, - "info": "Boolean flag to determine whether to autodetect the collection.", - "list": false, - "list_add_label": "Add More", - "name": "autodetect_collection", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "bool", - "value": true - }, - "code": { - "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "type": "code", - "value": "from collections import defaultdict\nfrom dataclasses import asdict, dataclass, field\n\nfrom astrapy import AstraDBAdmin, DataAPIClient, Database\nfrom astrapy.info import CollectionDescriptor\nfrom langchain_astradb import AstraDBVectorStore, CollectionVectorServiceOptions\n\nfrom langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom langflow.helpers import docs_to_data\nfrom langflow.inputs import FloatInput, NestedDictInput\nfrom langflow.io import (\n BoolInput,\n DropdownInput,\n HandleInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema import Data\nfrom langflow.utils.version import get_version_info\n\n\nclass AstraDBVectorStoreComponent(LCVectorStoreComponent):\n display_name: str = \"Astra DB\"\n description: str = \"Ingest and search documents in Astra DB\"\n documentation: str = \"https://docs.datastax.com/en/langflow/astra-components.html\"\n name = \"AstraDB\"\n icon: str = \"AstraDB\"\n\n _cached_vector_store: AstraDBVectorStore | None = None\n\n @dataclass\n class NewDatabaseInput:\n functionality: str = \"create\"\n fields: dict[str, dict] = field(\n default_factory=lambda: {\n \"data\": {\n \"node\": {\n \"name\": \"create_database\",\n \"description\": \"\",\n \"display_name\": \"Create new database\",\n \"field_order\": [\"new_database_name\", \"cloud_provider\", \"region\"],\n \"template\": {\n \"new_database_name\": StrInput(\n name=\"new_database_name\",\n display_name=\"Name\",\n info=\"Name of the new database to create in Astra DB.\",\n required=True,\n ),\n \"cloud_provider\": DropdownInput(\n name=\"cloud_provider\",\n display_name=\"Cloud provider\",\n info=\"Cloud provider for the new database.\",\n options=[\"Amazon Web Services\", \"Google Cloud Platform\", \"Microsoft Azure\"],\n required=True,\n real_time_refresh=True,\n ),\n \"region\": DropdownInput(\n name=\"region\",\n display_name=\"Region\",\n info=\"Region for the new database.\",\n options=[],\n required=True,\n ),\n },\n },\n }\n }\n )\n\n @dataclass\n class NewCollectionInput:\n functionality: str = \"create\"\n fields: dict[str, dict] = field(\n default_factory=lambda: {\n \"data\": {\n \"node\": {\n \"name\": \"create_collection\",\n \"description\": \"\",\n \"display_name\": \"Create new collection\",\n \"field_order\": [\n \"new_collection_name\",\n \"embedding_generation_provider\",\n \"embedding_generation_model\",\n \"dimension\",\n ],\n \"template\": {\n \"new_collection_name\": StrInput(\n name=\"new_collection_name\",\n display_name=\"Name\",\n info=\"Name of the new collection to create in Astra DB.\",\n required=True,\n ),\n \"embedding_generation_provider\": DropdownInput(\n name=\"embedding_generation_provider\",\n display_name=\"Embedding generation method\",\n info=\"Provider to use for generating embeddings.\",\n real_time_refresh=True,\n required=True,\n options=[\"Bring your own\", \"Nvidia\"],\n ),\n \"embedding_generation_model\": DropdownInput(\n name=\"embedding_generation_model\",\n display_name=\"Embedding model\",\n info=\"Model to use for generating embeddings.\",\n required=True,\n options=[],\n ),\n \"dimension\": IntInput(\n name=\"dimension\",\n display_name=\"Dimensions (Required only for `Bring your own`)\",\n info=\"Dimensions of the embeddings to generate.\",\n required=False,\n value=1024,\n ),\n },\n },\n }\n }\n )\n\n inputs = [\n SecretStrInput(\n name=\"token\",\n display_name=\"Astra DB Application Token\",\n info=\"Authentication token for accessing Astra DB.\",\n value=\"ASTRA_DB_APPLICATION_TOKEN\",\n required=True,\n real_time_refresh=True,\n input_types=[],\n ),\n StrInput(\n name=\"environment\",\n display_name=\"Environment\",\n info=\"The environment for the Astra DB API Endpoint.\",\n advanced=True,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"database_name\",\n display_name=\"Database\",\n info=\"The Database name for the Astra DB instance.\",\n required=True,\n refresh_button=True,\n real_time_refresh=True,\n dialog_inputs=asdict(NewDatabaseInput()),\n combobox=True,\n ),\n StrInput(\n name=\"api_endpoint\",\n display_name=\"Astra DB API Endpoint\",\n info=\"The API Endpoint for the Astra DB instance. Supercedes database selection.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"collection_name\",\n display_name=\"Collection\",\n info=\"The name of the collection within Astra DB where the vectors will be stored.\",\n required=True,\n refresh_button=True,\n real_time_refresh=True,\n dialog_inputs=asdict(NewCollectionInput()),\n combobox=True,\n advanced=True,\n ),\n StrInput(\n name=\"keyspace\",\n display_name=\"Keyspace\",\n info=\"Optional keyspace within Astra DB to use for the collection.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"embedding_choice\",\n display_name=\"Embedding Model or Astra Vectorize\",\n info=\"Choose an embedding model or use Astra Vectorize.\",\n options=[\"Embedding Model\", \"Astra Vectorize\"],\n value=\"Embedding Model\",\n advanced=True,\n real_time_refresh=True,\n ),\n HandleInput(\n name=\"embedding_model\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Specify the Embedding Model. Not required for Astra Vectorize collections.\",\n required=False,\n ),\n *LCVectorStoreComponent.inputs,\n IntInput(\n name=\"number_of_results\",\n display_name=\"Number of Search Results\",\n info=\"Number of search results to return.\",\n advanced=True,\n value=4,\n ),\n DropdownInput(\n name=\"search_type\",\n display_name=\"Search Type\",\n info=\"Search type to use\",\n options=[\"Similarity\", \"Similarity with score threshold\", \"MMR (Max Marginal Relevance)\"],\n value=\"Similarity\",\n advanced=True,\n ),\n FloatInput(\n name=\"search_score_threshold\",\n display_name=\"Search Score Threshold\",\n info=\"Minimum similarity score threshold for search results. \"\n \"(when using 'Similarity with score threshold')\",\n value=0,\n advanced=True,\n ),\n NestedDictInput(\n name=\"advanced_search_filter\",\n display_name=\"Search Metadata Filter\",\n info=\"Optional dictionary of filters to apply to the search query.\",\n advanced=True,\n ),\n BoolInput(\n name=\"autodetect_collection\",\n display_name=\"Autodetect Collection\",\n info=\"Boolean flag to determine whether to autodetect the collection.\",\n advanced=True,\n value=True,\n ),\n StrInput(\n name=\"content_field\",\n display_name=\"Content Field\",\n info=\"Field to use as the text content field for the vector store.\",\n advanced=True,\n ),\n StrInput(\n name=\"deletion_field\",\n display_name=\"Deletion Based On Field\",\n info=\"When this parameter is provided, documents in the target collection with \"\n \"metadata field values matching the input metadata field value will be deleted \"\n \"before new data is loaded.\",\n advanced=True,\n ),\n BoolInput(\n name=\"ignore_invalid_documents\",\n display_name=\"Ignore Invalid Documents\",\n info=\"Boolean flag to determine whether to ignore invalid documents at runtime.\",\n advanced=True,\n ),\n NestedDictInput(\n name=\"astradb_vectorstore_kwargs\",\n display_name=\"AstraDBVectorStore Parameters\",\n info=\"Optional dictionary of additional parameters for the AstraDBVectorStore.\",\n advanced=True,\n ),\n ]\n\n @classmethod\n def map_cloud_providers(cls):\n # TODO: Programmatically fetch the regions for each cloud provider\n return {\n \"Amazon Web Services\": {\n \"id\": \"aws\",\n \"regions\": [\"us-east-2\", \"ap-south-1\", \"eu-west-1\"],\n },\n \"Google Cloud Platform\": {\n \"id\": \"gcp\",\n \"regions\": [\"us-east1\"],\n },\n \"Microsoft Azure\": {\n \"id\": \"azure\",\n \"regions\": [\"westus3\"],\n },\n }\n\n @classmethod\n def get_vectorize_providers(cls, token: str, environment: str | None = None, api_endpoint: str | None = None):\n try:\n # Get the admin object\n admin = AstraDBAdmin(token=token, environment=environment)\n db_admin = admin.get_database_admin(api_endpoint=api_endpoint)\n\n # Get the list of embedding providers\n embedding_providers = db_admin.find_embedding_providers().as_dict()\n\n vectorize_providers_mapping = {}\n # Map the provider display name to the provider key and models\n for provider_key, provider_data in embedding_providers[\"embeddingProviders\"].items():\n # Get the provider display name and models\n display_name = provider_data[\"displayName\"]\n models = [model[\"name\"] for model in provider_data[\"models\"]]\n\n # Build our mapping\n vectorize_providers_mapping[display_name] = [provider_key, models]\n\n # Sort the resulting dictionary\n return defaultdict(list, dict(sorted(vectorize_providers_mapping.items())))\n except Exception as e:\n msg = f\"Error fetching vectorize providers: {e}\"\n raise ValueError(msg) from e\n\n @classmethod\n async def create_database_api(\n cls,\n new_database_name: str,\n cloud_provider: str,\n region: str,\n token: str,\n environment: str | None = None,\n keyspace: str | None = None,\n ):\n client = DataAPIClient(token=token, environment=environment)\n\n # Get the admin object\n admin_client = client.get_admin(token=token)\n\n # Call the create database function\n return await admin_client.async_create_database(\n name=new_database_name,\n cloud_provider=cls.map_cloud_providers()[cloud_provider][\"id\"],\n region=region,\n keyspace=keyspace,\n wait_until_active=False,\n )\n\n @classmethod\n async def create_collection_api(\n cls,\n new_collection_name: str,\n token: str,\n api_endpoint: str,\n environment: str | None = None,\n keyspace: str | None = None,\n dimension: int | None = None,\n embedding_generation_provider: str | None = None,\n embedding_generation_model: str | None = None,\n ):\n # Create the data API client\n client = DataAPIClient(token=token, environment=environment)\n\n # Get the database object\n database = client.get_async_database(api_endpoint=api_endpoint, token=token)\n\n # Build vectorize options, if needed\n vectorize_options = None\n if not dimension:\n vectorize_options = CollectionVectorServiceOptions(\n provider=cls.get_vectorize_providers(\n token=token, environment=environment, api_endpoint=api_endpoint\n ).get(embedding_generation_provider, [None, []])[0],\n model_name=embedding_generation_model,\n )\n\n # Create the collection\n return await database.create_collection(\n name=new_collection_name,\n keyspace=keyspace,\n dimension=dimension,\n service=vectorize_options,\n )\n\n @classmethod\n def get_database_list_static(cls, token: str, environment: str | None = None):\n client = DataAPIClient(token=token, environment=environment)\n\n # Get the admin object\n admin_client = client.get_admin(token=token)\n\n # Get the list of databases\n db_list = list(admin_client.list_databases())\n\n # Set the environment properly\n env_string = \"\"\n if environment and environment != \"prod\":\n env_string = f\"-{environment}\"\n\n # Generate the api endpoint for each database\n db_info_dict = {}\n for db in db_list:\n try:\n # Get the API endpoint for the database\n api_endpoint = f\"https://{db.info.id}-{db.info.region}.apps.astra{env_string}.datastax.com\"\n\n # Get the number of collections\n try:\n num_collections = len(\n list(\n client.get_database(\n api_endpoint=api_endpoint, token=token, keyspace=db.info.keyspace\n ).list_collection_names(keyspace=db.info.keyspace)\n )\n )\n except Exception: # noqa: BLE001\n num_collections = 0\n if db.status != \"PENDING\":\n continue\n\n # Add the database to the dictionary\n db_info_dict[db.info.name] = {\n \"api_endpoint\": api_endpoint,\n \"collections\": num_collections,\n \"status\": db.status if db.status != \"ACTIVE\" else None,\n }\n except Exception: # noqa: BLE001, S110\n pass\n\n return db_info_dict\n\n def get_database_list(self):\n return self.get_database_list_static(token=self.token, environment=self.environment)\n\n @classmethod\n def get_api_endpoint_static(\n cls,\n token: str,\n environment: str | None = None,\n api_endpoint: str | None = None,\n database_name: str | None = None,\n ):\n # If the api_endpoint is set, return it\n if api_endpoint:\n return api_endpoint\n\n # Check if the database_name is like a url\n if database_name and database_name.startswith(\"https://\"):\n return database_name\n\n # If the database is not set, nothing we can do.\n if not database_name:\n return None\n\n # Grab the database object\n db = cls.get_database_list_static(token=token, environment=environment).get(database_name)\n if not db:\n return None\n\n # Otherwise, get the URL from the database list\n return db.get(\"api_endpoint\")\n\n def get_api_endpoint(self):\n return self.get_api_endpoint_static(\n token=self.token,\n environment=self.environment,\n api_endpoint=self.api_endpoint,\n database_name=self.database_name,\n )\n\n def get_keyspace(self):\n keyspace = self.keyspace\n\n if keyspace:\n return keyspace.strip()\n\n return None\n\n def get_database_object(self, api_endpoint: str | None = None):\n try:\n client = DataAPIClient(token=self.token, environment=self.environment)\n\n return client.get_database(\n api_endpoint=api_endpoint or self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n except Exception as e:\n msg = f\"Error fetching database object: {e}\"\n raise ValueError(msg) from e\n\n def collection_data(self, collection_name: str, database: Database | None = None):\n try:\n if not database:\n client = DataAPIClient(token=self.token, environment=self.environment)\n\n database = client.get_database(\n api_endpoint=self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n\n collection = database.get_collection(collection_name, keyspace=self.get_keyspace())\n\n return collection.estimated_document_count()\n except Exception as e: # noqa: BLE001\n self.log(f\"Error checking collection data: {e}\")\n\n return None\n\n def _initialize_database_options(self):\n try:\n return [\n {\n \"name\": name,\n \"status\": info[\"status\"],\n \"collections\": info[\"collections\"],\n \"api_endpoint\": info[\"api_endpoint\"],\n \"icon\": \"data\",\n }\n for name, info in self.get_database_list().items()\n ]\n except Exception as e:\n msg = f\"Error fetching database options: {e}\"\n raise ValueError(msg) from e\n\n @classmethod\n def get_provider_icon(cls, collection: CollectionDescriptor | None = None, provider_name: str | None = None) -> str:\n # Get the provider name from the collection\n provider_name = provider_name or (\n collection.options.vector.service.provider\n if collection and collection.options and collection.options.vector and collection.options.vector.service\n else None\n )\n\n # If there is no provider, use the vector store icon\n if not provider_name or provider_name == \"bring your own\":\n return \"vectorstores\"\n\n # Special case for certain models\n # TODO: Add more icons\n if provider_name == \"nvidia\":\n return \"NVIDIA\"\n if provider_name == \"openai\":\n return \"OpenAI\"\n\n # Title case on the provider for the icon if no special case\n return provider_name.title()\n\n def _initialize_collection_options(self, api_endpoint: str | None = None):\n # Nothing to generate if we don't have an API endpoint yet\n api_endpoint = api_endpoint or self.get_api_endpoint()\n if not api_endpoint:\n return []\n\n # Retrieve the database object\n database = self.get_database_object(api_endpoint=api_endpoint)\n\n # Get the list of collections\n collection_list = list(database.list_collections(keyspace=self.get_keyspace()))\n\n # Return the list of collections and metadata associated\n return [\n {\n \"name\": col.name,\n \"records\": self.collection_data(collection_name=col.name, database=database),\n \"provider\": (\n col.options.vector.service.provider if col.options.vector and col.options.vector.service else None\n ),\n \"icon\": self.get_provider_icon(collection=col),\n \"model\": (\n col.options.vector.service.model_name if col.options.vector and col.options.vector.service else None\n ),\n }\n for col in collection_list\n ]\n\n def reset_provider_options(self, build_config: dict):\n # Get the list of vectorize providers\n vectorize_providers = self.get_vectorize_providers(\n token=self.token,\n environment=self.environment,\n api_endpoint=build_config[\"api_endpoint\"][\"value\"],\n )\n\n # Append a special case for Bring your own\n vectorize_providers[\"Bring your own\"] = [None, [\"Bring your own\"]]\n\n # If the collection is set, allow user to see embedding options\n build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_provider\"\n ][\"options\"] = [\"Bring your own\", \"Nvidia\", *[key for key in vectorize_providers if key != \"Nvidia\"]]\n\n # For all not Bring your own or Nvidia providers, add metadata saying configure in Astra DB Portal\n provider_options = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_provider\"\n ][\"options\"]\n\n # Go over each possible provider and add metadata to configure in Astra DB Portal\n for provider in provider_options:\n # Skip Bring your own and Nvidia, automatically configured\n if provider in [\"Bring your own\", \"Nvidia\"]:\n build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_provider\"\n ][\"options_metadata\"].append({\"icon\": self.get_provider_icon(provider_name=provider.lower())})\n continue\n\n # Add metadata to configure in Astra DB Portal\n build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_provider\"\n ][\"options_metadata\"].append({\" \": \"Configure in Astra DB Portal\"})\n\n # And allow the user to see the models based on a selected provider\n embedding_provider = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_provider\"\n ][\"value\"]\n\n # Set the options for the embedding model based on the provider\n build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_model\"\n ][\"options\"] = vectorize_providers.get(embedding_provider, [[], []])[1]\n\n return build_config\n\n def reset_collection_list(self, build_config: dict):\n # Get the list of options we have based on the token provided\n collection_options = self._initialize_collection_options(api_endpoint=build_config[\"api_endpoint\"][\"value\"])\n\n # If we retrieved options based on the token, show the dropdown\n build_config[\"collection_name\"][\"options\"] = [col[\"name\"] for col in collection_options]\n build_config[\"collection_name\"][\"options_metadata\"] = [\n {k: v for k, v in col.items() if k not in [\"name\"]} for col in collection_options\n ]\n\n # Reset the selected collection\n if build_config[\"collection_name\"][\"value\"] not in build_config[\"collection_name\"][\"options\"]:\n build_config[\"collection_name\"][\"value\"] = \"\"\n\n # If we have a database, collection name should not be advanced\n build_config[\"collection_name\"][\"advanced\"] = not build_config[\"database_name\"][\"value\"]\n\n return build_config\n\n def reset_database_list(self, build_config: dict):\n # Get the list of options we have based on the token provided\n database_options = self._initialize_database_options()\n\n # If we retrieved options based on the token, show the dropdown\n build_config[\"database_name\"][\"options\"] = [db[\"name\"] for db in database_options]\n build_config[\"database_name\"][\"options_metadata\"] = [\n {k: v for k, v in db.items() if k not in [\"name\"]} for db in database_options\n ]\n\n # Reset the selected database\n if build_config[\"database_name\"][\"value\"] not in build_config[\"database_name\"][\"options\"]:\n build_config[\"database_name\"][\"value\"] = \"\"\n build_config[\"api_endpoint\"][\"value\"] = \"\"\n build_config[\"collection_name\"][\"advanced\"] = True\n\n # If we have a token, database name should not be advanced\n build_config[\"database_name\"][\"advanced\"] = not build_config[\"token\"][\"value\"]\n\n return build_config\n\n def reset_build_config(self, build_config: dict):\n # Reset the list of databases we have based on the token provided\n build_config[\"database_name\"][\"options\"] = []\n build_config[\"database_name\"][\"options_metadata\"] = []\n build_config[\"database_name\"][\"value\"] = \"\"\n build_config[\"database_name\"][\"advanced\"] = True\n build_config[\"api_endpoint\"][\"value\"] = \"\"\n\n # Reset the list of collections and metadata associated\n build_config[\"collection_name\"][\"options\"] = []\n build_config[\"collection_name\"][\"options_metadata\"] = []\n build_config[\"collection_name\"][\"value\"] = \"\"\n build_config[\"collection_name\"][\"advanced\"] = True\n\n return build_config\n\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n # Callback for database creation\n if field_name == \"database_name\" and isinstance(field_value, dict) and \"new_database_name\" in field_value:\n try:\n await self.create_database_api(\n new_database_name=field_value[\"new_database_name\"],\n token=self.token,\n keyspace=self.get_keyspace(),\n environment=self.environment,\n cloud_provider=field_value[\"cloud_provider\"],\n region=field_value[\"region\"],\n )\n except Exception as e:\n msg = f\"Error creating database: {e}\"\n raise ValueError(msg) from e\n\n # Add the new database to the list of options\n build_config[\"database_name\"][\"options\"] = build_config[\"database_name\"][\"options\"] + [\n field_value[\"new_database_name\"]\n ]\n build_config[\"database_name\"][\"options_metadata\"] = build_config[\"database_name\"][\"options_metadata\"] + [\n {\"status\": \"PENDING\"}\n ]\n\n return self.reset_collection_list(build_config)\n\n # This is the callback required to update the list of regions for a cloud provider\n if field_name == \"database_name\" and isinstance(field_value, dict) and \"new_database_name\" not in field_value:\n cloud_provider = field_value[\"cloud_provider\"]\n build_config[\"database_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\"region\"][\n \"options\"\n ] = self.map_cloud_providers()[cloud_provider][\"regions\"]\n\n return build_config\n\n # Callback for the creation of collections\n if field_name == \"collection_name\" and isinstance(field_value, dict) and \"new_collection_name\" in field_value:\n try:\n # Get the dimension if its a BYO provider\n dimension = (\n field_value[\"dimension\"]\n if field_value[\"embedding_generation_provider\"] == \"Bring your own\"\n else None\n )\n\n # Create the collection\n await self.create_collection_api(\n new_collection_name=field_value[\"new_collection_name\"],\n token=self.token,\n api_endpoint=build_config[\"api_endpoint\"][\"value\"],\n environment=self.environment,\n keyspace=self.get_keyspace(),\n dimension=dimension,\n embedding_generation_provider=field_value[\"embedding_generation_provider\"],\n embedding_generation_model=field_value[\"embedding_generation_model\"],\n )\n except Exception as e:\n msg = f\"Error creating collection: {e}\"\n raise ValueError(msg) from e\n\n # Add the new collection to the list of options\n build_config[\"collection_name\"][\"value\"] = field_value[\"new_collection_name\"]\n build_config[\"collection_name\"][\"options\"].append(field_value[\"new_collection_name\"])\n\n # Get the provider and model for the new collection\n generation_provider = field_value[\"embedding_generation_provider\"]\n provider = generation_provider if generation_provider != \"Bring your own\" else None\n generation_model = field_value[\"embedding_generation_model\"]\n model = generation_model if generation_model and generation_model != \"Bring your own\" else None\n\n # Set the embedding choice\n build_config[\"embedding_choice\"][\"value\"] = \"Astra Vectorize\" if provider else \"Embedding Model\"\n build_config[\"embedding_model\"][\"advanced\"] = bool(provider)\n\n # Add the new collection to the list of options\n icon = \"NVIDIA\" if provider == \"Nvidia\" else \"vectorstores\"\n build_config[\"collection_name\"][\"options_metadata\"] = build_config[\"collection_name\"][\n \"options_metadata\"\n ] + [{\"records\": 0, \"provider\": provider, \"icon\": icon, \"model\": model}]\n\n return build_config\n\n # Callback to update the model list based on the embedding provider\n if (\n field_name == \"collection_name\"\n and isinstance(field_value, dict)\n and \"new_collection_name\" not in field_value\n ):\n return self.reset_provider_options(build_config)\n\n # When the component first executes, this is the update refresh call\n first_run = field_name == \"collection_name\" and not field_value and not build_config[\"database_name\"][\"options\"]\n\n # If the token has not been provided, simply return the empty build config\n if not self.token:\n return self.reset_build_config(build_config)\n\n # If this is the first execution of the component, reset and build database list\n if first_run or field_name in [\"token\", \"environment\"]:\n return self.reset_database_list(build_config)\n\n # Refresh the collection name options\n if field_name == \"database_name\" and not isinstance(field_value, dict):\n # If missing, refresh the database options\n if field_value not in build_config[\"database_name\"][\"options\"]:\n build_config = await self.update_build_config(build_config, field_value=self.token, field_name=\"token\")\n build_config[\"database_name\"][\"value\"] = \"\"\n else:\n # Find the position of the selected database to align with metadata\n index_of_name = build_config[\"database_name\"][\"options\"].index(field_value)\n\n # Initializing database condition\n pending = build_config[\"database_name\"][\"options_metadata\"][index_of_name][\"status\"] == \"PENDING\"\n if pending:\n return self.update_build_config(build_config, field_value=self.token, field_name=\"token\")\n\n # Set the API endpoint based on the selected database\n build_config[\"api_endpoint\"][\"value\"] = build_config[\"database_name\"][\"options_metadata\"][\n index_of_name\n ][\"api_endpoint\"]\n\n # Reset the provider options\n build_config = self.reset_provider_options(build_config)\n\n # Reset the list of collections we have based on the token provided\n return self.reset_collection_list(build_config)\n\n # Hide embedding model option if opriona_metadata provider is not null\n if field_name == \"collection_name\" and not isinstance(field_value, dict):\n # Assume we will be autodetecting the collection:\n build_config[\"autodetect_collection\"][\"value\"] = True\n\n # Reload the collection list\n build_config = self.reset_collection_list(build_config)\n\n # Set the options for collection name to be the field value if its a new collection\n if field_value and field_value not in build_config[\"collection_name\"][\"options\"]:\n # Add the new collection to the list of options\n build_config[\"collection_name\"][\"options\"].append(field_value)\n build_config[\"collection_name\"][\"options_metadata\"].append(\n {\"records\": 0, \"provider\": None, \"icon\": \"\", \"model\": None}\n )\n\n # Ensure that autodetect collection is set to False, since its a new collection\n build_config[\"autodetect_collection\"][\"value\"] = False\n\n # If nothing is selected, can't detect provider - return\n if not field_value:\n return build_config\n\n # Find the position of the selected collection to align with metadata\n index_of_name = build_config[\"collection_name\"][\"options\"].index(field_value)\n value_of_provider = build_config[\"collection_name\"][\"options_metadata\"][index_of_name][\"provider\"]\n\n # If we were able to determine the Vectorize provider, set it accordingly\n if value_of_provider:\n build_config[\"embedding_model\"][\"advanced\"] = True\n build_config[\"embedding_choice\"][\"value\"] = \"Astra Vectorize\"\n else:\n build_config[\"embedding_model\"][\"advanced\"] = False\n build_config[\"embedding_choice\"][\"value\"] = \"Embedding Model\"\n\n return build_config\n\n return build_config\n\n @check_cached_vector_store\n def build_vector_store(self):\n try:\n from langchain_astradb import AstraDBVectorStore\n except ImportError as e:\n msg = (\n \"Could not import langchain Astra DB integration package. \"\n \"Please install it with `pip install langchain-astradb`.\"\n )\n raise ImportError(msg) from e\n\n # Get the embedding model and additional params\n embedding_params = (\n {\"embedding\": self.embedding_model}\n if self.embedding_model and self.embedding_choice == \"Embedding Model\"\n else {}\n )\n\n # Get the additional parameters\n additional_params = self.astradb_vectorstore_kwargs or {}\n\n # Get Langflow version and platform information\n __version__ = get_version_info()[\"version\"]\n langflow_prefix = \"\"\n # if os.getenv(\"AWS_EXECUTION_ENV\") == \"AWS_ECS_FARGATE\": # TODO: More precise way of detecting\n # langflow_prefix = \"ds-\"\n\n # Get the database object\n database = self.get_database_object()\n autodetect = self.collection_name in database.list_collection_names() and self.autodetect_collection\n\n # Bundle up the auto-detect parameters\n autodetect_params = {\n \"autodetect_collection\": autodetect,\n \"content_field\": (\n self.content_field\n if self.content_field and embedding_params\n else (\n \"page_content\"\n if embedding_params\n and self.collection_data(collection_name=self.collection_name, database=database) == 0\n else None\n )\n ),\n \"ignore_invalid_documents\": self.ignore_invalid_documents,\n }\n\n # Attempt to build the Vector Store object\n try:\n vector_store = AstraDBVectorStore(\n # Astra DB Authentication Parameters\n token=self.token,\n api_endpoint=database.api_endpoint,\n namespace=database.keyspace,\n collection_name=self.collection_name,\n environment=self.environment,\n # Astra DB Usage Tracking Parameters\n ext_callers=[(f\"{langflow_prefix}langflow\", __version__)],\n # Astra DB Vector Store Parameters\n **autodetect_params,\n **embedding_params,\n **additional_params,\n )\n except Exception as e:\n msg = f\"Error initializing AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n # Add documents to the vector store\n self._add_documents_to_vector_store(vector_store)\n\n return vector_store\n\n def _add_documents_to_vector_store(self, vector_store) -> None:\n documents = []\n for _input in self.ingest_data or []:\n if isinstance(_input, Data):\n documents.append(_input.to_lc_document())\n else:\n msg = \"Vector Store Inputs must be Data objects.\"\n raise TypeError(msg)\n\n if documents and self.deletion_field:\n self.log(f\"Deleting documents where {self.deletion_field}\")\n try:\n database = self.get_database_object()\n collection = database.get_collection(self.collection_name, keyspace=database.keyspace)\n delete_values = list({doc.metadata[self.deletion_field] for doc in documents})\n self.log(f\"Deleting documents where {self.deletion_field} matches {delete_values}.\")\n collection.delete_many({f\"metadata.{self.deletion_field}\": {\"$in\": delete_values}})\n except Exception as e:\n msg = f\"Error deleting documents from AstraDBVectorStore based on '{self.deletion_field}': {e}\"\n raise ValueError(msg) from e\n\n if documents:\n self.log(f\"Adding {len(documents)} documents to the Vector Store.\")\n try:\n vector_store.add_documents(documents)\n except Exception as e:\n msg = f\"Error adding documents to AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n else:\n self.log(\"No documents to add to the Vector Store.\")\n\n def _map_search_type(self) -> str:\n search_type_mapping = {\n \"Similarity with score threshold\": \"similarity_score_threshold\",\n \"MMR (Max Marginal Relevance)\": \"mmr\",\n }\n\n return search_type_mapping.get(self.search_type, \"similarity\")\n\n def _build_search_args(self):\n query = self.search_query if isinstance(self.search_query, str) and self.search_query.strip() else None\n\n if query:\n args = {\n \"query\": query,\n \"search_type\": self._map_search_type(),\n \"k\": self.number_of_results,\n \"score_threshold\": self.search_score_threshold,\n }\n elif self.advanced_search_filter:\n args = {\n \"n\": self.number_of_results,\n }\n else:\n return {}\n\n filter_arg = self.advanced_search_filter or {}\n if filter_arg:\n args[\"filter\"] = filter_arg\n\n return args\n\n def search_documents(self, vector_store=None) -> list[Data]:\n vector_store = vector_store or self.build_vector_store()\n\n self.log(f\"Search input: {self.search_query}\")\n self.log(f\"Search type: {self.search_type}\")\n self.log(f\"Number of results: {self.number_of_results}\")\n\n try:\n search_args = self._build_search_args()\n except Exception as e:\n msg = f\"Error in AstraDBVectorStore._build_search_args: {e}\"\n raise ValueError(msg) from e\n\n if not search_args:\n self.log(\"No search input or filters provided. Skipping search.\")\n return []\n\n docs = []\n search_method = \"search\" if \"query\" in search_args else \"metadata_search\"\n\n try:\n self.log(f\"Calling vector_store.{search_method} with args: {search_args}\")\n docs = getattr(vector_store, search_method)(**search_args)\n except Exception as e:\n msg = f\"Error performing {search_method} in AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n self.log(f\"Retrieved documents: {len(docs)}\")\n\n data = docs_to_data(docs)\n self.log(f\"Converted documents to data: {len(data)}\")\n self.status = data\n\n return data\n\n def get_retriever_kwargs(self):\n search_args = self._build_search_args()\n\n return {\n \"search_type\": self._map_search_type(),\n \"search_kwargs\": search_args,\n }\n" - }, - "collection_name": { - "_input_type": "DropdownInput", - "advanced": true, - "combobox": true, - "dialog_inputs": { - "fields": { - "data": { - "node": { - "description": "", - "display_name": "Create new collection", - "field_order": [ - "new_collection_name", - "embedding_generation_provider", - "embedding_generation_model", - "dimension" - ], - "name": "create_collection", - "template": { - "dimension": { - "_input_type": "IntInput", - "advanced": false, - "display_name": "Dimensions (Required only for `Bring your own`)", - "dynamic": false, - "info": "Dimensions of the embeddings to generate.", - "list": false, - "list_add_label": "Add More", - "name": "dimension", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "int", - "value": 1024 - }, - "embedding_generation_model": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Embedding model", - "dynamic": false, - "info": "Model to use for generating embeddings.", - "name": "embedding_generation_model", - "options": [], - "options_metadata": [], - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "embedding_generation_provider": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Embedding generation method", - "dynamic": false, - "info": "Provider to use for generating embeddings.", - "name": "embedding_generation_provider", - "options": [ - "Bring your own", - "Nvidia" - ], - "options_metadata": [], - "placeholder": "", - "real_time_refresh": true, - "required": true, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "new_collection_name": { - "_input_type": "StrInput", - "advanced": false, - "display_name": "Name", - "dynamic": false, - "info": "Name of the new collection to create in Astra DB.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "new_collection_name", - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "" - } - } - } - } - }, - "functionality": "create" - }, - "display_name": "Collection", - "dynamic": false, - "info": "The name of the collection within Astra DB where the vectors will be stored.", - "name": "collection_name", - "options": [], - "options_metadata": [], - "placeholder": "", - "real_time_refresh": true, - "refresh_button": true, - "required": true, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "content_field": { - "_input_type": "StrInput", - "advanced": true, - "display_name": "Content Field", - "dynamic": false, - "info": "Field to use as the text content field for the vector store.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "content_field", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "database_name": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": true, - "dialog_inputs": { - "fields": { - "data": { - "node": { - "description": "", - "display_name": "Create new database", - "field_order": [ - "new_database_name", - "cloud_provider", - "region" - ], - "name": "create_database", - "template": { - "cloud_provider": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Cloud provider", - "dynamic": false, - "info": "Cloud provider for the new database.", - "name": "cloud_provider", - "options": [ - "Amazon Web Services", - "Google Cloud Platform", - "Microsoft Azure" - ], - "options_metadata": [], - "placeholder": "", - "real_time_refresh": true, - "required": true, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "new_database_name": { - "_input_type": "StrInput", - "advanced": false, - "display_name": "Name", - "dynamic": false, - "info": "Name of the new database to create in Astra DB.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "new_database_name", - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "region": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Region", - "dynamic": false, - "info": "Region for the new database.", - "name": "region", - "options": [], - "options_metadata": [], - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "" - } - } - } - } - }, - "functionality": "create" - }, - "display_name": "Database", - "dynamic": false, - "info": "The Database name for the Astra DB instance.", - "name": "database_name", - "options": [], - "options_metadata": [], - "placeholder": "", - "real_time_refresh": true, - "refresh_button": true, - "required": true, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "deletion_field": { - "_input_type": "StrInput", - "advanced": true, - "display_name": "Deletion Based On Field", - "dynamic": false, - "info": "When this parameter is provided, documents in the target collection with metadata field values matching the input metadata field value will be deleted before new data is loaded.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "deletion_field", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "embedding_choice": { - "_input_type": "DropdownInput", - "advanced": true, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Embedding Model or Astra Vectorize", - "dynamic": false, - "info": "Choose an embedding model or use Astra Vectorize.", - "name": "embedding_choice", - "options": [ - "Embedding Model", - "Astra Vectorize" - ], - "options_metadata": [], - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "Embedding Model" - }, - "embedding_model": { - "_input_type": "HandleInput", - "advanced": false, - "display_name": "Embedding Model", - "dynamic": false, - "info": "Specify the Embedding Model. Not required for Astra Vectorize collections.", - "input_types": [ - "Embeddings" - ], - "list": false, - "list_add_label": "Add More", - "name": "embedding_model", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_metadata": true, - "type": "other", - "value": "" - }, - "environment": { - "_input_type": "StrInput", - "advanced": true, - "display_name": "Environment", - "dynamic": false, - "info": "The environment for the Astra DB API Endpoint.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "environment", - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "ignore_invalid_documents": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Ignore Invalid Documents", - "dynamic": false, - "info": "Boolean flag to determine whether to ignore invalid documents at runtime.", - "list": false, - "list_add_label": "Add More", - "name": "ignore_invalid_documents", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "bool", - "value": false - }, - "ingest_data": { - "_input_type": "DataInput", - "advanced": false, - "display_name": "Ingest Data", - "dynamic": false, - "info": "", - "input_types": [ - "Data" - ], - "list": false, - "list_add_label": "Add More", - "name": "ingest_data", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "other", - "value": "" - }, - "keyspace": { - "_input_type": "StrInput", - "advanced": true, - "display_name": "Keyspace", - "dynamic": false, - "info": "Optional keyspace within Astra DB to use for the collection.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "keyspace", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "number_of_results": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "Number of Search Results", - "dynamic": false, - "info": "Number of search results to return.", - "list": false, - "list_add_label": "Add More", - "name": "number_of_results", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "int", - "value": 4 - }, - "search_query": { - "_input_type": "MultilineInput", - "advanced": false, - "display_name": "Search Query", - "dynamic": false, - "info": "", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "multiline": true, - "name": "search_query", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": true, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "search_score_threshold": { - "_input_type": "FloatInput", - "advanced": true, - "display_name": "Search Score Threshold", - "dynamic": false, - "info": "Minimum similarity score threshold for search results. (when using 'Similarity with score threshold')", - "list": false, - "list_add_label": "Add More", - "name": "search_score_threshold", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "float", - "value": 0 - }, - "search_type": { - "_input_type": "DropdownInput", - "advanced": true, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Search Type", - "dynamic": false, - "info": "Search type to use", - "name": "search_type", - "options": [ - "Similarity", - "Similarity with score threshold", - "MMR (Max Marginal Relevance)" - ], - "options_metadata": [], - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "Similarity" - }, - "token": { - "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "Astra DB Application Token", - "dynamic": false, - "info": "Authentication token for accessing Astra DB.", - "input_types": [], - "load_from_db": true, - "name": "token", - "password": true, - "placeholder": "", - "real_time_refresh": true, - "required": true, - "show": true, - "title_case": false, - "type": "str", - "value": "" - } - }, - "tool_mode": false - }, - "showNode": true, - "type": "AstraDB" - }, - "dragging": false, - "id": "AstraDB-UZ35f", - "measured": { - "height": 594, - "width": 360 - }, - "position": { - "x": 2066.56373433188, - "y": 1488.8926065144908 - }, - "selected": false, - "type": "genericNode" - }, - { - "data": { - "id": "AstraDB-Etytu", + "id": "AstraDB-XU7Xo", "node": { "base_classes": [ "Data", @@ -4066,9 +3411,9 @@ "dimension": { "_input_type": "IntInput", "advanced": false, - "display_name": "Dimensions (Required only for `Bring your own`)", + "display_name": "Dimensions", "dynamic": false, - "info": "Dimensions of the embeddings to generate.", + "info": "Dimension of the embeddings to generate.", "list": false, "list_add_label": "Add More", "name": "dimension", @@ -4090,7 +3435,10 @@ "dynamic": false, "info": "Model to use for generating embeddings.", "name": "embedding_generation_model", - "options": [], + "options": [ + "Bring your own", + "NV-Embed-QA" + ], "options_metadata": [], "placeholder": "", "required": true, @@ -4256,7 +3604,11 @@ "dynamic": false, "info": "Region for the new database.", "name": "region", - "options": [], + "options": [ + "us-east-2", + "ap-south-1", + "eu-west-1" + ], "options_metadata": [], "placeholder": "", "required": true, @@ -4530,7 +3882,7 @@ "dynamic": false, "info": "Authentication token for accessing Astra DB.", "input_types": [], - "load_from_db": true, + "load_from_db": false, "name": "token", "password": true, "placeholder": "", @@ -4548,28 +3900,714 @@ "type": "AstraDB" }, "dragging": false, - "id": "AstraDB-Etytu", + "id": "AstraDB-XU7Xo", "measured": { "height": 594, "width": 360 }, "position": { - "x": 1220.0616335680065, - "y": 624.1607892883594 + "x": 1213.4353517134307, + "y": 631.4125346711122 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "id": "AstraDB-fyg6q", + "node": { + "base_classes": [ + "Data", + "DataFrame" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Ingest and search documents in Astra DB", + "display_name": "Astra DB", + "documentation": "https://docs.datastax.com/en/langflow/astra-components.html", + "edited": false, + "field_order": [ + "token", + "environment", + "database_name", + "api_endpoint", + "collection_name", + "keyspace", + "embedding_choice", + "embedding_model", + "ingest_data", + "search_query", + "number_of_results", + "search_type", + "search_score_threshold", + "advanced_search_filter", + "autodetect_collection", + "content_field", + "deletion_field", + "ignore_invalid_documents", + "astradb_vectorstore_kwargs" + ], + "frozen": false, + "icon": "AstraDB", + "legacy": false, + "metadata": {}, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Search Results", + "method": "search_documents", + "name": "search_results", + "required_inputs": [ + "collection_name", + "database_name", + "token" + ], + "selected": "Data", + "tool_mode": true, + "types": [ + "Data" + ], + "value": "__UNDEFINED__" + }, + { + "allows_loop": false, + "cache": true, + "display_name": "DataFrame", + "method": "as_dataframe", + "name": "dataframe", + "required_inputs": [], + "selected": "DataFrame", + "tool_mode": true, + "types": [ + "DataFrame" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_type": "Component", + "advanced_search_filter": { + "_input_type": "NestedDictInput", + "advanced": true, + "display_name": "Search Metadata Filter", + "dynamic": false, + "info": "Optional dictionary of filters to apply to the search query.", + "list": false, + "list_add_label": "Add More", + "name": "advanced_search_filter", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "type": "NestedDict", + "value": {} + }, + "api_endpoint": { + "_input_type": "StrInput", + "advanced": true, + "display_name": "Astra DB API Endpoint", + "dynamic": false, + "info": "The API Endpoint for the Astra DB instance. Supercedes database selection.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "api_endpoint", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "str", + "value": "" + }, + "astradb_vectorstore_kwargs": { + "_input_type": "NestedDictInput", + "advanced": true, + "display_name": "AstraDBVectorStore Parameters", + "dynamic": false, + "info": "Optional dictionary of additional parameters for the AstraDBVectorStore.", + "list": false, + "list_add_label": "Add More", + "name": "astradb_vectorstore_kwargs", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "type": "NestedDict", + "value": {} + }, + "autodetect_collection": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Autodetect Collection", + "dynamic": false, + "info": "Boolean flag to determine whether to autodetect the collection.", + "list": false, + "list_add_label": "Add More", + "name": "autodetect_collection", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "bool", + "value": true + }, + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from collections import defaultdict\nfrom dataclasses import asdict, dataclass, field\n\nfrom astrapy import AstraDBAdmin, DataAPIClient, Database\nfrom astrapy.info import CollectionDescriptor\nfrom langchain_astradb import AstraDBVectorStore, CollectionVectorServiceOptions\n\nfrom langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom langflow.helpers import docs_to_data\nfrom langflow.inputs import FloatInput, NestedDictInput\nfrom langflow.io import (\n BoolInput,\n DropdownInput,\n HandleInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema import Data\nfrom langflow.utils.version import get_version_info\n\n\nclass AstraDBVectorStoreComponent(LCVectorStoreComponent):\n display_name: str = \"Astra DB\"\n description: str = \"Ingest and search documents in Astra DB\"\n documentation: str = \"https://docs.datastax.com/en/langflow/astra-components.html\"\n name = \"AstraDB\"\n icon: str = \"AstraDB\"\n\n _cached_vector_store: AstraDBVectorStore | None = None\n\n @dataclass\n class NewDatabaseInput:\n functionality: str = \"create\"\n fields: dict[str, dict] = field(\n default_factory=lambda: {\n \"data\": {\n \"node\": {\n \"name\": \"create_database\",\n \"description\": \"\",\n \"display_name\": \"Create new database\",\n \"field_order\": [\"new_database_name\", \"cloud_provider\", \"region\"],\n \"template\": {\n \"new_database_name\": StrInput(\n name=\"new_database_name\",\n display_name=\"Name\",\n info=\"Name of the new database to create in Astra DB.\",\n required=True,\n ),\n \"cloud_provider\": DropdownInput(\n name=\"cloud_provider\",\n display_name=\"Cloud provider\",\n info=\"Cloud provider for the new database.\",\n options=[\"Amazon Web Services\", \"Google Cloud Platform\", \"Microsoft Azure\"],\n required=True,\n real_time_refresh=True,\n ),\n \"region\": DropdownInput(\n name=\"region\",\n display_name=\"Region\",\n info=\"Region for the new database.\",\n options=[],\n required=True,\n ),\n },\n },\n }\n }\n )\n\n @dataclass\n class NewCollectionInput:\n functionality: str = \"create\"\n fields: dict[str, dict] = field(\n default_factory=lambda: {\n \"data\": {\n \"node\": {\n \"name\": \"create_collection\",\n \"description\": \"\",\n \"display_name\": \"Create new collection\",\n \"field_order\": [\n \"new_collection_name\",\n \"embedding_generation_provider\",\n \"embedding_generation_model\",\n \"dimension\",\n ],\n \"template\": {\n \"new_collection_name\": StrInput(\n name=\"new_collection_name\",\n display_name=\"Name\",\n info=\"Name of the new collection to create in Astra DB.\",\n required=True,\n ),\n \"embedding_generation_provider\": DropdownInput(\n name=\"embedding_generation_provider\",\n display_name=\"Embedding generation method\",\n info=\"Provider to use for generating embeddings.\",\n real_time_refresh=True,\n required=True,\n options=[\"Bring your own\", \"Nvidia\"],\n ),\n \"embedding_generation_model\": DropdownInput(\n name=\"embedding_generation_model\",\n display_name=\"Embedding model\",\n info=\"Model to use for generating embeddings.\",\n required=True,\n options=[],\n ),\n \"dimension\": IntInput(\n name=\"dimension\",\n display_name=\"Dimensions (Required only for `Bring your own`)\",\n info=\"Dimensions of the embeddings to generate.\",\n required=False,\n value=1024,\n ),\n },\n },\n }\n }\n )\n\n inputs = [\n SecretStrInput(\n name=\"token\",\n display_name=\"Astra DB Application Token\",\n info=\"Authentication token for accessing Astra DB.\",\n value=\"ASTRA_DB_APPLICATION_TOKEN\",\n required=True,\n real_time_refresh=True,\n input_types=[],\n ),\n StrInput(\n name=\"environment\",\n display_name=\"Environment\",\n info=\"The environment for the Astra DB API Endpoint.\",\n advanced=True,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"database_name\",\n display_name=\"Database\",\n info=\"The Database name for the Astra DB instance.\",\n required=True,\n refresh_button=True,\n real_time_refresh=True,\n dialog_inputs=asdict(NewDatabaseInput()),\n combobox=True,\n ),\n StrInput(\n name=\"api_endpoint\",\n display_name=\"Astra DB API Endpoint\",\n info=\"The API Endpoint for the Astra DB instance. Supercedes database selection.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"collection_name\",\n display_name=\"Collection\",\n info=\"The name of the collection within Astra DB where the vectors will be stored.\",\n required=True,\n refresh_button=True,\n real_time_refresh=True,\n dialog_inputs=asdict(NewCollectionInput()),\n combobox=True,\n advanced=True,\n ),\n StrInput(\n name=\"keyspace\",\n display_name=\"Keyspace\",\n info=\"Optional keyspace within Astra DB to use for the collection.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"embedding_choice\",\n display_name=\"Embedding Model or Astra Vectorize\",\n info=\"Choose an embedding model or use Astra Vectorize.\",\n options=[\"Embedding Model\", \"Astra Vectorize\"],\n value=\"Embedding Model\",\n advanced=True,\n real_time_refresh=True,\n ),\n HandleInput(\n name=\"embedding_model\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Specify the Embedding Model. Not required for Astra Vectorize collections.\",\n required=False,\n ),\n *LCVectorStoreComponent.inputs,\n IntInput(\n name=\"number_of_results\",\n display_name=\"Number of Search Results\",\n info=\"Number of search results to return.\",\n advanced=True,\n value=4,\n ),\n DropdownInput(\n name=\"search_type\",\n display_name=\"Search Type\",\n info=\"Search type to use\",\n options=[\"Similarity\", \"Similarity with score threshold\", \"MMR (Max Marginal Relevance)\"],\n value=\"Similarity\",\n advanced=True,\n ),\n FloatInput(\n name=\"search_score_threshold\",\n display_name=\"Search Score Threshold\",\n info=\"Minimum similarity score threshold for search results. \"\n \"(when using 'Similarity with score threshold')\",\n value=0,\n advanced=True,\n ),\n NestedDictInput(\n name=\"advanced_search_filter\",\n display_name=\"Search Metadata Filter\",\n info=\"Optional dictionary of filters to apply to the search query.\",\n advanced=True,\n ),\n BoolInput(\n name=\"autodetect_collection\",\n display_name=\"Autodetect Collection\",\n info=\"Boolean flag to determine whether to autodetect the collection.\",\n advanced=True,\n value=True,\n ),\n StrInput(\n name=\"content_field\",\n display_name=\"Content Field\",\n info=\"Field to use as the text content field for the vector store.\",\n advanced=True,\n ),\n StrInput(\n name=\"deletion_field\",\n display_name=\"Deletion Based On Field\",\n info=\"When this parameter is provided, documents in the target collection with \"\n \"metadata field values matching the input metadata field value will be deleted \"\n \"before new data is loaded.\",\n advanced=True,\n ),\n BoolInput(\n name=\"ignore_invalid_documents\",\n display_name=\"Ignore Invalid Documents\",\n info=\"Boolean flag to determine whether to ignore invalid documents at runtime.\",\n advanced=True,\n ),\n NestedDictInput(\n name=\"astradb_vectorstore_kwargs\",\n display_name=\"AstraDBVectorStore Parameters\",\n info=\"Optional dictionary of additional parameters for the AstraDBVectorStore.\",\n advanced=True,\n ),\n ]\n\n @classmethod\n def map_cloud_providers(cls):\n # TODO: Programmatically fetch the regions for each cloud provider\n return {\n \"Amazon Web Services\": {\n \"id\": \"aws\",\n \"regions\": [\"us-east-2\", \"ap-south-1\", \"eu-west-1\"],\n },\n \"Google Cloud Platform\": {\n \"id\": \"gcp\",\n \"regions\": [\"us-east1\"],\n },\n \"Microsoft Azure\": {\n \"id\": \"azure\",\n \"regions\": [\"westus3\"],\n },\n }\n\n @classmethod\n def get_vectorize_providers(cls, token: str, environment: str | None = None, api_endpoint: str | None = None):\n try:\n # Get the admin object\n admin = AstraDBAdmin(token=token, environment=environment)\n db_admin = admin.get_database_admin(api_endpoint=api_endpoint)\n\n # Get the list of embedding providers\n embedding_providers = db_admin.find_embedding_providers().as_dict()\n\n vectorize_providers_mapping = {}\n # Map the provider display name to the provider key and models\n for provider_key, provider_data in embedding_providers[\"embeddingProviders\"].items():\n # Get the provider display name and models\n display_name = provider_data[\"displayName\"]\n models = [model[\"name\"] for model in provider_data[\"models\"]]\n\n # Build our mapping\n vectorize_providers_mapping[display_name] = [provider_key, models]\n\n # Sort the resulting dictionary\n return defaultdict(list, dict(sorted(vectorize_providers_mapping.items())))\n except Exception as e:\n msg = f\"Error fetching vectorize providers: {e}\"\n raise ValueError(msg) from e\n\n @classmethod\n async def create_database_api(\n cls,\n new_database_name: str,\n cloud_provider: str,\n region: str,\n token: str,\n environment: str | None = None,\n keyspace: str | None = None,\n ):\n client = DataAPIClient(token=token, environment=environment)\n\n # Get the admin object\n admin_client = client.get_admin(token=token)\n\n # Call the create database function\n return await admin_client.async_create_database(\n name=new_database_name,\n cloud_provider=cls.map_cloud_providers()[cloud_provider][\"id\"],\n region=region,\n keyspace=keyspace,\n wait_until_active=False,\n )\n\n @classmethod\n async def create_collection_api(\n cls,\n new_collection_name: str,\n token: str,\n api_endpoint: str,\n environment: str | None = None,\n keyspace: str | None = None,\n dimension: int | None = None,\n embedding_generation_provider: str | None = None,\n embedding_generation_model: str | None = None,\n ):\n # Create the data API client\n client = DataAPIClient(token=token, environment=environment)\n\n # Get the database object\n database = client.get_async_database(api_endpoint=api_endpoint, token=token)\n\n # Build vectorize options, if needed\n vectorize_options = None\n if not dimension:\n vectorize_options = CollectionVectorServiceOptions(\n provider=cls.get_vectorize_providers(\n token=token, environment=environment, api_endpoint=api_endpoint\n ).get(embedding_generation_provider, [None, []])[0],\n model_name=embedding_generation_model,\n )\n\n # Create the collection\n return await database.create_collection(\n name=new_collection_name,\n keyspace=keyspace,\n dimension=dimension,\n service=vectorize_options,\n )\n\n @classmethod\n def get_database_list_static(cls, token: str, environment: str | None = None):\n client = DataAPIClient(token=token, environment=environment)\n\n # Get the admin object\n admin_client = client.get_admin(token=token)\n\n # Get the list of databases\n db_list = list(admin_client.list_databases())\n\n # Set the environment properly\n env_string = \"\"\n if environment and environment != \"prod\":\n env_string = f\"-{environment}\"\n\n # Generate the api endpoint for each database\n db_info_dict = {}\n for db in db_list:\n try:\n # Get the API endpoint for the database\n api_endpoint = f\"https://{db.info.id}-{db.info.region}.apps.astra{env_string}.datastax.com\"\n\n # Get the number of collections\n try:\n num_collections = len(\n list(\n client.get_database(\n api_endpoint=api_endpoint, token=token, keyspace=db.info.keyspace\n ).list_collection_names(keyspace=db.info.keyspace)\n )\n )\n except Exception: # noqa: BLE001\n num_collections = 0\n if db.status != \"PENDING\":\n continue\n\n # Add the database to the dictionary\n db_info_dict[db.info.name] = {\n \"api_endpoint\": api_endpoint,\n \"collections\": num_collections,\n \"status\": db.status if db.status != \"ACTIVE\" else None,\n }\n except Exception: # noqa: BLE001, S110\n pass\n\n return db_info_dict\n\n def get_database_list(self):\n return self.get_database_list_static(token=self.token, environment=self.environment)\n\n @classmethod\n def get_api_endpoint_static(\n cls,\n token: str,\n environment: str | None = None,\n api_endpoint: str | None = None,\n database_name: str | None = None,\n ):\n # If the api_endpoint is set, return it\n if api_endpoint:\n return api_endpoint\n\n # Check if the database_name is like a url\n if database_name and database_name.startswith(\"https://\"):\n return database_name\n\n # If the database is not set, nothing we can do.\n if not database_name:\n return None\n\n # Grab the database object\n db = cls.get_database_list_static(token=token, environment=environment).get(database_name)\n if not db:\n return None\n\n # Otherwise, get the URL from the database list\n return db.get(\"api_endpoint\")\n\n def get_api_endpoint(self):\n return self.get_api_endpoint_static(\n token=self.token,\n environment=self.environment,\n api_endpoint=self.api_endpoint,\n database_name=self.database_name,\n )\n\n def get_keyspace(self):\n keyspace = self.keyspace\n\n if keyspace:\n return keyspace.strip()\n\n return None\n\n def get_database_object(self, api_endpoint: str | None = None):\n try:\n client = DataAPIClient(token=self.token, environment=self.environment)\n\n return client.get_database(\n api_endpoint=api_endpoint or self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n except Exception as e:\n msg = f\"Error fetching database object: {e}\"\n raise ValueError(msg) from e\n\n def collection_data(self, collection_name: str, database: Database | None = None):\n try:\n if not database:\n client = DataAPIClient(token=self.token, environment=self.environment)\n\n database = client.get_database(\n api_endpoint=self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n\n collection = database.get_collection(collection_name, keyspace=self.get_keyspace())\n\n return collection.estimated_document_count()\n except Exception as e: # noqa: BLE001\n self.log(f\"Error checking collection data: {e}\")\n\n return None\n\n def _initialize_database_options(self):\n try:\n return [\n {\n \"name\": name,\n \"status\": info[\"status\"],\n \"collections\": info[\"collections\"],\n \"api_endpoint\": info[\"api_endpoint\"],\n \"icon\": \"data\",\n }\n for name, info in self.get_database_list().items()\n ]\n except Exception as e:\n msg = f\"Error fetching database options: {e}\"\n raise ValueError(msg) from e\n\n @classmethod\n def get_provider_icon(cls, collection: CollectionDescriptor | None = None, provider_name: str | None = None) -> str:\n # Get the provider name from the collection\n provider_name = provider_name or (\n collection.options.vector.service.provider\n if collection and collection.options and collection.options.vector and collection.options.vector.service\n else None\n )\n\n # If there is no provider, use the vector store icon\n if not provider_name or provider_name == \"bring your own\":\n return \"vectorstores\"\n\n # Special case for certain models\n # TODO: Add more icons\n if provider_name == \"nvidia\":\n return \"NVIDIA\"\n if provider_name == \"openai\":\n return \"OpenAI\"\n\n # Title case on the provider for the icon if no special case\n return provider_name.title()\n\n def _initialize_collection_options(self, api_endpoint: str | None = None):\n # Nothing to generate if we don't have an API endpoint yet\n api_endpoint = api_endpoint or self.get_api_endpoint()\n if not api_endpoint:\n return []\n\n # Retrieve the database object\n database = self.get_database_object(api_endpoint=api_endpoint)\n\n # Get the list of collections\n collection_list = list(database.list_collections(keyspace=self.get_keyspace()))\n\n # Return the list of collections and metadata associated\n return [\n {\n \"name\": col.name,\n \"records\": self.collection_data(collection_name=col.name, database=database),\n \"provider\": (\n col.options.vector.service.provider if col.options.vector and col.options.vector.service else None\n ),\n \"icon\": self.get_provider_icon(collection=col),\n \"model\": (\n col.options.vector.service.model_name if col.options.vector and col.options.vector.service else None\n ),\n }\n for col in collection_list\n ]\n\n def reset_provider_options(self, build_config: dict):\n # Get the list of vectorize providers\n vectorize_providers = self.get_vectorize_providers(\n token=self.token,\n environment=self.environment,\n api_endpoint=build_config[\"api_endpoint\"][\"value\"],\n )\n\n # Append a special case for Bring your own\n vectorize_providers[\"Bring your own\"] = [None, [\"Bring your own\"]]\n\n # If the collection is set, allow user to see embedding options\n build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_provider\"\n ][\"options\"] = [\"Bring your own\", \"Nvidia\", *[key for key in vectorize_providers if key != \"Nvidia\"]]\n\n # For all not Bring your own or Nvidia providers, add metadata saying configure in Astra DB Portal\n provider_options = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_provider\"\n ][\"options\"]\n\n # Go over each possible provider and add metadata to configure in Astra DB Portal\n for provider in provider_options:\n # Skip Bring your own and Nvidia, automatically configured\n if provider in [\"Bring your own\", \"Nvidia\"]:\n build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_provider\"\n ][\"options_metadata\"].append({\"icon\": self.get_provider_icon(provider_name=provider.lower())})\n continue\n\n # Add metadata to configure in Astra DB Portal\n build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_provider\"\n ][\"options_metadata\"].append({\" \": \"Configure in Astra DB Portal\"})\n\n # And allow the user to see the models based on a selected provider\n embedding_provider = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_provider\"\n ][\"value\"]\n\n # Set the options for the embedding model based on the provider\n build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_model\"\n ][\"options\"] = vectorize_providers.get(embedding_provider, [[], []])[1]\n\n return build_config\n\n def reset_collection_list(self, build_config: dict):\n # Get the list of options we have based on the token provided\n collection_options = self._initialize_collection_options(api_endpoint=build_config[\"api_endpoint\"][\"value\"])\n\n # If we retrieved options based on the token, show the dropdown\n build_config[\"collection_name\"][\"options\"] = [col[\"name\"] for col in collection_options]\n build_config[\"collection_name\"][\"options_metadata\"] = [\n {k: v for k, v in col.items() if k not in [\"name\"]} for col in collection_options\n ]\n\n # Reset the selected collection\n if build_config[\"collection_name\"][\"value\"] not in build_config[\"collection_name\"][\"options\"]:\n build_config[\"collection_name\"][\"value\"] = \"\"\n\n # If we have a database, collection name should not be advanced\n build_config[\"collection_name\"][\"advanced\"] = not build_config[\"database_name\"][\"value\"]\n\n return build_config\n\n def reset_database_list(self, build_config: dict):\n # Get the list of options we have based on the token provided\n database_options = self._initialize_database_options()\n\n # If we retrieved options based on the token, show the dropdown\n build_config[\"database_name\"][\"options\"] = [db[\"name\"] for db in database_options]\n build_config[\"database_name\"][\"options_metadata\"] = [\n {k: v for k, v in db.items() if k not in [\"name\"]} for db in database_options\n ]\n\n # Reset the selected database\n if build_config[\"database_name\"][\"value\"] not in build_config[\"database_name\"][\"options\"]:\n build_config[\"database_name\"][\"value\"] = \"\"\n build_config[\"api_endpoint\"][\"value\"] = \"\"\n build_config[\"collection_name\"][\"advanced\"] = True\n\n # If we have a token, database name should not be advanced\n build_config[\"database_name\"][\"advanced\"] = not build_config[\"token\"][\"value\"]\n\n return build_config\n\n def reset_build_config(self, build_config: dict):\n # Reset the list of databases we have based on the token provided\n build_config[\"database_name\"][\"options\"] = []\n build_config[\"database_name\"][\"options_metadata\"] = []\n build_config[\"database_name\"][\"value\"] = \"\"\n build_config[\"database_name\"][\"advanced\"] = True\n build_config[\"api_endpoint\"][\"value\"] = \"\"\n\n # Reset the list of collections and metadata associated\n build_config[\"collection_name\"][\"options\"] = []\n build_config[\"collection_name\"][\"options_metadata\"] = []\n build_config[\"collection_name\"][\"value\"] = \"\"\n build_config[\"collection_name\"][\"advanced\"] = True\n\n return build_config\n\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n # Callback for database creation\n if field_name == \"database_name\" and isinstance(field_value, dict) and \"new_database_name\" in field_value:\n try:\n await self.create_database_api(\n new_database_name=field_value[\"new_database_name\"],\n token=self.token,\n keyspace=self.get_keyspace(),\n environment=self.environment,\n cloud_provider=field_value[\"cloud_provider\"],\n region=field_value[\"region\"],\n )\n except Exception as e:\n msg = f\"Error creating database: {e}\"\n raise ValueError(msg) from e\n\n # Add the new database to the list of options\n build_config[\"database_name\"][\"options\"] = build_config[\"database_name\"][\"options\"] + [\n field_value[\"new_database_name\"]\n ]\n build_config[\"database_name\"][\"options_metadata\"] = build_config[\"database_name\"][\"options_metadata\"] + [\n {\"status\": \"PENDING\"}\n ]\n\n return self.reset_collection_list(build_config)\n\n # This is the callback required to update the list of regions for a cloud provider\n if field_name == \"database_name\" and isinstance(field_value, dict) and \"new_database_name\" not in field_value:\n cloud_provider = field_value[\"cloud_provider\"]\n build_config[\"database_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\"region\"][\n \"options\"\n ] = self.map_cloud_providers()[cloud_provider][\"regions\"]\n\n return build_config\n\n # Callback for the creation of collections\n if field_name == \"collection_name\" and isinstance(field_value, dict) and \"new_collection_name\" in field_value:\n try:\n # Get the dimension if its a BYO provider\n dimension = (\n field_value[\"dimension\"]\n if field_value[\"embedding_generation_provider\"] == \"Bring your own\"\n else None\n )\n\n # Create the collection\n await self.create_collection_api(\n new_collection_name=field_value[\"new_collection_name\"],\n token=self.token,\n api_endpoint=build_config[\"api_endpoint\"][\"value\"],\n environment=self.environment,\n keyspace=self.get_keyspace(),\n dimension=dimension,\n embedding_generation_provider=field_value[\"embedding_generation_provider\"],\n embedding_generation_model=field_value[\"embedding_generation_model\"],\n )\n except Exception as e:\n msg = f\"Error creating collection: {e}\"\n raise ValueError(msg) from e\n\n # Add the new collection to the list of options\n build_config[\"collection_name\"][\"value\"] = field_value[\"new_collection_name\"]\n build_config[\"collection_name\"][\"options\"].append(field_value[\"new_collection_name\"])\n\n # Get the provider and model for the new collection\n generation_provider = field_value[\"embedding_generation_provider\"]\n provider = generation_provider if generation_provider != \"Bring your own\" else None\n generation_model = field_value[\"embedding_generation_model\"]\n model = generation_model if generation_model and generation_model != \"Bring your own\" else None\n\n # Set the embedding choice\n build_config[\"embedding_choice\"][\"value\"] = \"Astra Vectorize\" if provider else \"Embedding Model\"\n build_config[\"embedding_model\"][\"advanced\"] = bool(provider)\n\n # Add the new collection to the list of options\n icon = \"NVIDIA\" if provider == \"Nvidia\" else \"vectorstores\"\n build_config[\"collection_name\"][\"options_metadata\"] = build_config[\"collection_name\"][\n \"options_metadata\"\n ] + [{\"records\": 0, \"provider\": provider, \"icon\": icon, \"model\": model}]\n\n return build_config\n\n # Callback to update the model list based on the embedding provider\n if (\n field_name == \"collection_name\"\n and isinstance(field_value, dict)\n and \"new_collection_name\" not in field_value\n ):\n return self.reset_provider_options(build_config)\n\n # When the component first executes, this is the update refresh call\n first_run = field_name == \"collection_name\" and not field_value and not build_config[\"database_name\"][\"options\"]\n\n # If the token has not been provided, simply return the empty build config\n if not self.token:\n return self.reset_build_config(build_config)\n\n # If this is the first execution of the component, reset and build database list\n if first_run or field_name in [\"token\", \"environment\"]:\n return self.reset_database_list(build_config)\n\n # Refresh the collection name options\n if field_name == \"database_name\" and not isinstance(field_value, dict):\n # If missing, refresh the database options\n if field_value not in build_config[\"database_name\"][\"options\"]:\n build_config = await self.update_build_config(build_config, field_value=self.token, field_name=\"token\")\n build_config[\"database_name\"][\"value\"] = \"\"\n else:\n # Find the position of the selected database to align with metadata\n index_of_name = build_config[\"database_name\"][\"options\"].index(field_value)\n\n # Initializing database condition\n pending = build_config[\"database_name\"][\"options_metadata\"][index_of_name][\"status\"] == \"PENDING\"\n if pending:\n return self.update_build_config(build_config, field_value=self.token, field_name=\"token\")\n\n # Set the API endpoint based on the selected database\n build_config[\"api_endpoint\"][\"value\"] = build_config[\"database_name\"][\"options_metadata\"][\n index_of_name\n ][\"api_endpoint\"]\n\n # Reset the provider options\n build_config = self.reset_provider_options(build_config)\n\n # Reset the list of collections we have based on the token provided\n return self.reset_collection_list(build_config)\n\n # Hide embedding model option if opriona_metadata provider is not null\n if field_name == \"collection_name\" and not isinstance(field_value, dict):\n # Assume we will be autodetecting the collection:\n build_config[\"autodetect_collection\"][\"value\"] = True\n\n # Reload the collection list\n build_config = self.reset_collection_list(build_config)\n\n # Set the options for collection name to be the field value if its a new collection\n if field_value and field_value not in build_config[\"collection_name\"][\"options\"]:\n # Add the new collection to the list of options\n build_config[\"collection_name\"][\"options\"].append(field_value)\n build_config[\"collection_name\"][\"options_metadata\"].append(\n {\"records\": 0, \"provider\": None, \"icon\": \"\", \"model\": None}\n )\n\n # Ensure that autodetect collection is set to False, since its a new collection\n build_config[\"autodetect_collection\"][\"value\"] = False\n\n # If nothing is selected, can't detect provider - return\n if not field_value:\n return build_config\n\n # Find the position of the selected collection to align with metadata\n index_of_name = build_config[\"collection_name\"][\"options\"].index(field_value)\n value_of_provider = build_config[\"collection_name\"][\"options_metadata\"][index_of_name][\"provider\"]\n\n # If we were able to determine the Vectorize provider, set it accordingly\n if value_of_provider:\n build_config[\"embedding_model\"][\"advanced\"] = True\n build_config[\"embedding_choice\"][\"value\"] = \"Astra Vectorize\"\n else:\n build_config[\"embedding_model\"][\"advanced\"] = False\n build_config[\"embedding_choice\"][\"value\"] = \"Embedding Model\"\n\n return build_config\n\n return build_config\n\n @check_cached_vector_store\n def build_vector_store(self):\n try:\n from langchain_astradb import AstraDBVectorStore\n except ImportError as e:\n msg = (\n \"Could not import langchain Astra DB integration package. \"\n \"Please install it with `pip install langchain-astradb`.\"\n )\n raise ImportError(msg) from e\n\n # Get the embedding model and additional params\n embedding_params = (\n {\"embedding\": self.embedding_model}\n if self.embedding_model and self.embedding_choice == \"Embedding Model\"\n else {}\n )\n\n # Get the additional parameters\n additional_params = self.astradb_vectorstore_kwargs or {}\n\n # Get Langflow version and platform information\n __version__ = get_version_info()[\"version\"]\n langflow_prefix = \"\"\n # if os.getenv(\"AWS_EXECUTION_ENV\") == \"AWS_ECS_FARGATE\": # TODO: More precise way of detecting\n # langflow_prefix = \"ds-\"\n\n # Get the database object\n database = self.get_database_object()\n autodetect = self.collection_name in database.list_collection_names() and self.autodetect_collection\n\n # Bundle up the auto-detect parameters\n autodetect_params = {\n \"autodetect_collection\": autodetect,\n \"content_field\": (\n self.content_field\n if self.content_field and embedding_params\n else (\n \"page_content\"\n if embedding_params\n and self.collection_data(collection_name=self.collection_name, database=database) == 0\n else None\n )\n ),\n \"ignore_invalid_documents\": self.ignore_invalid_documents,\n }\n\n # Attempt to build the Vector Store object\n try:\n vector_store = AstraDBVectorStore(\n # Astra DB Authentication Parameters\n token=self.token,\n api_endpoint=database.api_endpoint,\n namespace=database.keyspace,\n collection_name=self.collection_name,\n environment=self.environment,\n # Astra DB Usage Tracking Parameters\n ext_callers=[(f\"{langflow_prefix}langflow\", __version__)],\n # Astra DB Vector Store Parameters\n **autodetect_params,\n **embedding_params,\n **additional_params,\n )\n except Exception as e:\n msg = f\"Error initializing AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n # Add documents to the vector store\n self._add_documents_to_vector_store(vector_store)\n\n return vector_store\n\n def _add_documents_to_vector_store(self, vector_store) -> None:\n documents = []\n for _input in self.ingest_data or []:\n if isinstance(_input, Data):\n documents.append(_input.to_lc_document())\n else:\n msg = \"Vector Store Inputs must be Data objects.\"\n raise TypeError(msg)\n\n if documents and self.deletion_field:\n self.log(f\"Deleting documents where {self.deletion_field}\")\n try:\n database = self.get_database_object()\n collection = database.get_collection(self.collection_name, keyspace=database.keyspace)\n delete_values = list({doc.metadata[self.deletion_field] for doc in documents})\n self.log(f\"Deleting documents where {self.deletion_field} matches {delete_values}.\")\n collection.delete_many({f\"metadata.{self.deletion_field}\": {\"$in\": delete_values}})\n except Exception as e:\n msg = f\"Error deleting documents from AstraDBVectorStore based on '{self.deletion_field}': {e}\"\n raise ValueError(msg) from e\n\n if documents:\n self.log(f\"Adding {len(documents)} documents to the Vector Store.\")\n try:\n vector_store.add_documents(documents)\n except Exception as e:\n msg = f\"Error adding documents to AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n else:\n self.log(\"No documents to add to the Vector Store.\")\n\n def _map_search_type(self) -> str:\n search_type_mapping = {\n \"Similarity with score threshold\": \"similarity_score_threshold\",\n \"MMR (Max Marginal Relevance)\": \"mmr\",\n }\n\n return search_type_mapping.get(self.search_type, \"similarity\")\n\n def _build_search_args(self):\n query = self.search_query if isinstance(self.search_query, str) and self.search_query.strip() else None\n\n if query:\n args = {\n \"query\": query,\n \"search_type\": self._map_search_type(),\n \"k\": self.number_of_results,\n \"score_threshold\": self.search_score_threshold,\n }\n elif self.advanced_search_filter:\n args = {\n \"n\": self.number_of_results,\n }\n else:\n return {}\n\n filter_arg = self.advanced_search_filter or {}\n if filter_arg:\n args[\"filter\"] = filter_arg\n\n return args\n\n def search_documents(self, vector_store=None) -> list[Data]:\n vector_store = vector_store or self.build_vector_store()\n\n self.log(f\"Search input: {self.search_query}\")\n self.log(f\"Search type: {self.search_type}\")\n self.log(f\"Number of results: {self.number_of_results}\")\n\n try:\n search_args = self._build_search_args()\n except Exception as e:\n msg = f\"Error in AstraDBVectorStore._build_search_args: {e}\"\n raise ValueError(msg) from e\n\n if not search_args:\n self.log(\"No search input or filters provided. Skipping search.\")\n return []\n\n docs = []\n search_method = \"search\" if \"query\" in search_args else \"metadata_search\"\n\n try:\n self.log(f\"Calling vector_store.{search_method} with args: {search_args}\")\n docs = getattr(vector_store, search_method)(**search_args)\n except Exception as e:\n msg = f\"Error performing {search_method} in AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n self.log(f\"Retrieved documents: {len(docs)}\")\n\n data = docs_to_data(docs)\n self.log(f\"Converted documents to data: {len(data)}\")\n self.status = data\n\n return data\n\n def get_retriever_kwargs(self):\n search_args = self._build_search_args()\n\n return {\n \"search_type\": self._map_search_type(),\n \"search_kwargs\": search_args,\n }\n" + }, + "collection_name": { + "_input_type": "DropdownInput", + "advanced": true, + "combobox": true, + "dialog_inputs": { + "fields": { + "data": { + "node": { + "description": "", + "display_name": "Create new collection", + "field_order": [ + "new_collection_name", + "embedding_generation_provider", + "embedding_generation_model" + ], + "name": "create_collection", + "template": { + "dimension": { + "_input_type": "IntInput", + "advanced": false, + "display_name": "Dimensions", + "dynamic": false, + "info": "Dimension of the embeddings to generate.", + "list": false, + "list_add_label": "Add More", + "name": "dimension", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "int", + "value": 1024 + }, + "embedding_generation_model": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Embedding model", + "dynamic": false, + "info": "Model to use for generating embeddings.", + "name": "embedding_generation_model", + "options": [ + "Bring your own", + "NV-Embed-QA" + ], + "options_metadata": [], + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "str", + "value": "" + }, + "embedding_generation_provider": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Embedding generation method", + "dynamic": false, + "info": "Provider to use for generating embeddings.", + "name": "embedding_generation_provider", + "options": [ + "Bring your own", + "Nvidia" + ], + "options_metadata": [], + "placeholder": "", + "real_time_refresh": true, + "required": true, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "str", + "value": "" + }, + "new_collection_name": { + "_input_type": "StrInput", + "advanced": false, + "display_name": "Name", + "dynamic": false, + "info": "Name of the new collection to create in Astra DB.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "new_collection_name", + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "str", + "value": "" + } + } + } + } + }, + "functionality": "create" + }, + "display_name": "Collection", + "dynamic": false, + "info": "The name of the collection within Astra DB where the vectors will be stored.", + "name": "collection_name", + "options": [], + "options_metadata": [], + "placeholder": "", + "real_time_refresh": true, + "refresh_button": true, + "required": true, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "str", + "value": "" + }, + "content_field": { + "_input_type": "StrInput", + "advanced": true, + "display_name": "Content Field", + "dynamic": false, + "info": "Field to use as the text content field for the vector store.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "content_field", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "str", + "value": "" + }, + "database_name": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": true, + "dialog_inputs": { + "fields": { + "data": { + "node": { + "description": "", + "display_name": "Create new database", + "field_order": [ + "new_database_name", + "cloud_provider", + "region" + ], + "name": "create_database", + "template": { + "cloud_provider": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Cloud provider", + "dynamic": false, + "info": "Cloud provider for the new database.", + "name": "cloud_provider", + "options": [ + "Amazon Web Services", + "Google Cloud Platform", + "Microsoft Azure" + ], + "options_metadata": [], + "placeholder": "", + "real_time_refresh": true, + "required": true, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "str", + "value": "" + }, + "new_database_name": { + "_input_type": "StrInput", + "advanced": false, + "display_name": "Name", + "dynamic": false, + "info": "Name of the new database to create in Astra DB.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "new_database_name", + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "str", + "value": "" + }, + "region": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Region", + "dynamic": false, + "info": "Region for the new database.", + "name": "region", + "options": [ + "us-east-2", + "ap-south-1", + "eu-west-1" + ], + "options_metadata": [], + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "str", + "value": "" + } + } + } + } + }, + "functionality": "create" + }, + "display_name": "Database", + "dynamic": false, + "info": "The Database name for the Astra DB instance.", + "name": "database_name", + "options": [], + "options_metadata": [], + "placeholder": "", + "real_time_refresh": true, + "refresh_button": true, + "required": true, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "str", + "value": "" + }, + "deletion_field": { + "_input_type": "StrInput", + "advanced": true, + "display_name": "Deletion Based On Field", + "dynamic": false, + "info": "When this parameter is provided, documents in the target collection with metadata field values matching the input metadata field value will be deleted before new data is loaded.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "deletion_field", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "str", + "value": "" + }, + "embedding_choice": { + "_input_type": "DropdownInput", + "advanced": true, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Embedding Model or Astra Vectorize", + "dynamic": false, + "info": "Choose an embedding model or use Astra Vectorize.", + "name": "embedding_choice", + "options": [ + "Embedding Model", + "Astra Vectorize" + ], + "options_metadata": [], + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "str", + "value": "Embedding Model" + }, + "embedding_model": { + "_input_type": "HandleInput", + "advanced": false, + "display_name": "Embedding Model", + "dynamic": false, + "info": "Specify the Embedding Model. Not required for Astra Vectorize collections.", + "input_types": [ + "Embeddings" + ], + "list": false, + "list_add_label": "Add More", + "name": "embedding_model", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "trace_as_metadata": true, + "type": "other", + "value": "" + }, + "environment": { + "_input_type": "StrInput", + "advanced": true, + "display_name": "Environment", + "dynamic": false, + "info": "The environment for the Astra DB API Endpoint.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "environment", + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "str", + "value": "" + }, + "ignore_invalid_documents": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Ignore Invalid Documents", + "dynamic": false, + "info": "Boolean flag to determine whether to ignore invalid documents at runtime.", + "list": false, + "list_add_label": "Add More", + "name": "ignore_invalid_documents", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "bool", + "value": false + }, + "ingest_data": { + "_input_type": "DataInput", + "advanced": false, + "display_name": "Ingest Data", + "dynamic": false, + "info": "", + "input_types": [ + "Data" + ], + "list": false, + "list_add_label": "Add More", + "name": "ingest_data", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "type": "other", + "value": "" + }, + "keyspace": { + "_input_type": "StrInput", + "advanced": true, + "display_name": "Keyspace", + "dynamic": false, + "info": "Optional keyspace within Astra DB to use for the collection.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "keyspace", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "str", + "value": "" + }, + "number_of_results": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Number of Search Results", + "dynamic": false, + "info": "Number of search results to return.", + "list": false, + "list_add_label": "Add More", + "name": "number_of_results", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "int", + "value": 4 + }, + "search_query": { + "_input_type": "MultilineInput", + "advanced": false, + "display_name": "Search Query", + "dynamic": false, + "info": "", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "multiline": true, + "name": "search_query", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": true, + "trace_as_input": true, + "trace_as_metadata": true, + "type": "str", + "value": "" + }, + "search_score_threshold": { + "_input_type": "FloatInput", + "advanced": true, + "display_name": "Search Score Threshold", + "dynamic": false, + "info": "Minimum similarity score threshold for search results. (when using 'Similarity with score threshold')", + "list": false, + "list_add_label": "Add More", + "name": "search_score_threshold", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "float", + "value": 0 + }, + "search_type": { + "_input_type": "DropdownInput", + "advanced": true, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Search Type", + "dynamic": false, + "info": "Search type to use", + "name": "search_type", + "options": [ + "Similarity", + "Similarity with score threshold", + "MMR (Max Marginal Relevance)" + ], + "options_metadata": [], + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "str", + "value": "Similarity" + }, + "token": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "Astra DB Application Token", + "dynamic": false, + "info": "Authentication token for accessing Astra DB.", + "input_types": [], + "load_from_db": false, + "name": "token", + "password": true, + "placeholder": "", + "real_time_refresh": true, + "required": true, + "show": true, + "title_case": false, + "type": "str", + "value": "" + } + }, + "tool_mode": false + }, + "showNode": true, + "type": "AstraDB" + }, + "dragging": false, + "id": "AstraDB-fyg6q", + "measured": { + "height": 449, + "width": 320 + }, + "position": { + "x": 2065.4581687557493, + "y": 1496.259507100966 }, "selected": false, "type": "genericNode" } ], "viewport": { - "x": 27.16134293669745, - "y": -178.0488836602833, - "zoom": 0.48566409515519327 + "x": -348.2904435861742, + "y": -373.26831165592205, + "zoom": 0.6306258994328376 } }, "description": "Load your data for chat context with Retrieval Augmented Generation.", "endpoint_name": null, - "id": "217f5b21-a26e-485c-8e22-420cddc47678", + "id": "0ea0fb07-2bcc-4c51-ad28-8ef9239843af", "is_component": false, "last_tested_version": "1.1.5", "name": "Vector Store RAG", diff --git a/src/backend/base/langflow/schema/dataframe.py b/src/backend/base/langflow/schema/dataframe.py index 757e59355..193036dc2 100644 --- a/src/backend/base/langflow/schema/dataframe.py +++ b/src/backend/base/langflow/schema/dataframe.py @@ -1,6 +1,7 @@ from typing import cast import pandas as pd +from langchain_core.documents import Document from pandas import DataFrame as pandas_DataFrame from langflow.schema.data import Data @@ -32,9 +33,21 @@ class DataFrame(pandas_DataFrame): >>> dataset = DataFrame({"name": ["John", "Jane"], "age": [30, 25]}) """ - def __init__(self, data: list[dict] | list[Data] | pd.DataFrame | None = None, **kwargs): + def __init__( + self, + data: list[dict] | list[Data] | pd.DataFrame | None = None, + text_key: str = "text", + default_value: str = "", + **kwargs, + ): + # Initialize pandas DataFrame first without data + super().__init__(**kwargs) # Removed data parameter + + # Store attributes as private members to avoid conflicts with pandas + self._text_key = text_key + self._default_value = default_value + if data is None: - super().__init__(**kwargs) return if isinstance(data, list): @@ -43,15 +56,36 @@ class DataFrame(pandas_DataFrame): elif not all(isinstance(x, dict) for x in data): msg = "List items must be either all Data objects or all dictionaries" raise ValueError(msg) - kwargs["data"] = data - elif isinstance(data, dict | pd.DataFrame): - kwargs["data"] = data + self._update(data, **kwargs) + elif isinstance(data, dict | pd.DataFrame): # Fixed type check syntax + self._update(data, **kwargs) - super().__init__(**kwargs) + def _update(self, data, **kwargs): + """Helper method to update DataFrame with new data.""" + new_df = pd.DataFrame(data, **kwargs) + self._update_inplace(new_df) + + # Update property accessors + @property + def text_key(self) -> str: + return self._text_key + + @text_key.setter + def text_key(self, value: str) -> None: + self._text_key = value + + @property + def default_value(self) -> str: + return self._default_value + + @default_value.setter + def default_value(self, value: str) -> None: + self._default_value = value def to_data_list(self) -> list[Data]: """Converts the DataFrame back to a list of Data objects.""" list_of_dicts = self.to_dict(orient="records") + # suggested change: [Data(**row) for row in list_of_dicts] return [Data(data=row) for row in list_of_dicts] def add_row(self, data: dict | Data) -> "DataFrame": @@ -103,3 +137,31 @@ class DataFrame(pandas_DataFrame): Returns True if the DataFrame has at least one row, False otherwise. """ return not self.empty + + def to_lc_documents(self) -> list[Document]: + """Converts the DataFrame to a list of Documents. + + Returns: + list[Document]: The converted list of Documents. + """ + list_of_dicts = self.to_dict(orient="records") + documents = [] + for row in list_of_dicts: + data_copy = row.copy() + text = data_copy.pop(self._text_key, self._default_value) + if isinstance(text, str): + documents.append(Document(page_content=text, metadata=data_copy)) + else: + documents.append(Document(page_content=str(text), metadata=data_copy)) + return documents + + def _docs_to_dataframe(self, docs): + """Converts a list of Documents to a DataFrame. + + Args: + docs: List of Document objects + + Returns: + DataFrame: A new DataFrame with the converted Documents + """ + return DataFrame(docs) diff --git a/src/backend/tests/unit/components/processing/test_split_text_component.py b/src/backend/tests/unit/components/processing/test_split_text_component.py index bd2b574ef..bd08a54e8 100644 --- a/src/backend/tests/unit/components/processing/test_split_text_component.py +++ b/src/backend/tests/unit/components/processing/test_split_text_component.py @@ -1,4 +1,5 @@ import pytest +from langflow.components.data import URLComponent from langflow.components.processing import SplitTextComponent from langflow.schema import Data, DataFrame @@ -44,6 +45,7 @@ class TestSplitTextComponent(ComponentTestBaseWithoutClient): "chunk_overlap": 0, "chunk_size": 15, "separator": "\n", + "text_key": "text", "session_id": "test_session", "sender": "test_sender", "sender_name": "test_sender_name", @@ -220,3 +222,53 @@ class TestSplitTextComponent(ComponentTestBaseWithoutClient): assert "Second line" in results[1].text, f"Expected 'Second line', got '{results[1].text}'" assert "Another text" in results[2].text, f"Expected 'Another text', got '{results[2].text}'" assert "Another line" in results[3].text, f"Expected 'Another line', got '{results[3].text}'" + + def test_split_text_with_dataframe_input(self): + """Test splitting text with DataFrame input.""" + component = SplitTextComponent() + test_texts = ["First text\nSecond line", "Another text\nAnother line"] + data_frame = DataFrame([Data(text=text) for text in test_texts]) + component.set_attributes( + { + "data_inputs": data_frame, + "chunk_overlap": 0, + "chunk_size": 10, + "separator": "\n", + "session_id": "test_session", + "sender": "test_sender", + "sender_name": "test_sender_name", + } + ) + + results = component.split_text() + assert len(results) == 4, f"Expected 4 chunks (2 from each text), got {len(results)}" + assert "First text" in results[0].text, f"Expected 'First text', got '{results[0].text}'" + assert "Second line" in results[1].text, f"Expected 'Second line', got '{results[1].text}'" + assert "Another text" in results[2].text, f"Expected 'Another text', got '{results[2].text}'" + assert "Another line" in results[3].text, f"Expected 'Another line', got '{results[3].text}'" + + def test_with_url_loader(self): + """Test splitting text with URL loader.""" + component = SplitTextComponent() + url = ["https://en.wikipedia.org/wiki/London", "https://en.wikipedia.org/wiki/Paris"] + data_frame = URLComponent(urls=url, format="Text").as_dataframe() + assert isinstance(data_frame, DataFrame), "Expected DataFrame instance" + assert len(data_frame) == 2, f"Expected DataFrame with 2 rows, got {len(data_frame)}" + component.set_attributes( + { + "data_inputs": data_frame, + "chunk_overlap": 0, + "chunk_size": 10, + "separator": "\n", + "session_id": "test_session", + "sender": "test_sender", + "sender_name": "test_sender_name", + } + ) + results = component.as_dataframe() + assert isinstance(results, DataFrame), "Expected DataFrame instance" + assert len(results) > 2, f"Expected DataFrame with more than 2 rows, got {len(results)}" + + results = component.split_text() + assert isinstance(results, list), "Expected list instance" + assert len(results) > 2, f"Expected DataFrame with more than 2 rows, got {len(results)}" diff --git a/src/backend/tests/unit/schema/test_schema_dataframe.py b/src/backend/tests/unit/schema/test_schema_dataframe.py new file mode 100644 index 000000000..7bd9d2ea7 --- /dev/null +++ b/src/backend/tests/unit/schema/test_schema_dataframe.py @@ -0,0 +1,66 @@ +import pandas as pd +import pytest +from langchain_core.documents import Document +from langflow.schema.data import Data +from langflow.schema.dataframe import DataFrame + + +@pytest.fixture +def sample_dataframe(): + """Create a sample DataFrame for testing.""" + return pd.DataFrame({"name": ["John", "Jane"], "text": ["name is John", "name is Jane"]}) + + +class TestDataFrameSchema: + def test_to_data_list(self, sample_dataframe): + """Test conversion of DataFrame to list of Data objects.""" + data_frame = DataFrame(sample_dataframe) + data_list = data_frame.to_data_list() + assert isinstance(data_list, list) + assert all(isinstance(item, Data) for item in data_list) + assert len(data_list) == len(sample_dataframe) + assert data_list[0].data["name"] == "John" + assert data_list[0].data["text"] == "name is John" + + def test_add_row(self, sample_dataframe): + """Test adding a single row to DataFrame.""" + data_frame = DataFrame(sample_dataframe) + # Test adding dict + new_df = data_frame.add_row({"name": "Bob", "text": "name is Bob"}) + assert len(new_df) == len(sample_dataframe) + 1 + assert new_df.iloc[-1]["name"] == "Bob" + assert new_df.iloc[-1]["text"] == "name is Bob" + + # Test adding Data object + data_obj = Data(data={"name": "Alice", "text": "name is Alice"}) + new_df = data_frame.add_row(data_obj) + assert len(new_df) == len(sample_dataframe) + 1 + assert new_df.iloc[-1]["name"] == "Alice" + assert new_df.iloc[-1]["text"] == "name is Alice" + + def test_add_rows(self, sample_dataframe): + """Test adding multiple rows to DataFrame.""" + data_frame = DataFrame(sample_dataframe) + new_rows = [{"name": "Bob", "text": "name is Bob"}, Data(data={"name": "Alice", "text": "name is Alice"})] + new_df = data_frame.add_rows(new_rows) + assert len(new_df) == len(sample_dataframe) + 2 + assert new_df.iloc[-2:]["name"].tolist() == ["Bob", "Alice"] + assert new_df.iloc[-2:]["text"].tolist() == ["name is Bob", "name is Alice"] + + def test_to_lc_documents(self, sample_dataframe): + """Test conversion to LangChain documents.""" + data_frame = DataFrame(sample_dataframe) + documents = data_frame.to_lc_documents() + assert isinstance(documents, list) + assert all(isinstance(doc, Document) for doc in documents) + assert len(documents) == 2 + assert documents[0].page_content == "name is John" + assert documents[0].metadata == {"name": "John"} + + def test_bool_operator(self): + """Test boolean operator behavior.""" + empty_df = DataFrame() + assert not bool(empty_df) + + non_empty_df = DataFrame({"name": ["John"], "text": ["name is John"]}) + assert bool(non_empty_df) diff --git a/src/frontend/package-lock.json b/src/frontend/package-lock.json index 765ff4fc8..ee0d38eb9 100644 --- a/src/frontend/package-lock.json +++ b/src/frontend/package-lock.json @@ -836,6 +836,7 @@ }, "node_modules/@clack/prompts/node_modules/is-unicode-supported": { "version": "1.3.0", + "extraneous": true, "inBundle": true, "license": "MIT", "engines": { diff --git a/src/frontend/tests/core/features/freeze.spec.ts b/src/frontend/tests/core/features/freeze.spec.ts index 13d38ea64..3435e26ab 100644 --- a/src/frontend/tests/core/features/freeze.spec.ts +++ b/src/frontend/tests/core/features/freeze.spec.ts @@ -119,7 +119,7 @@ test( await urlOutput.hover(); await page.mouse.down(); const splitTextInputData = await page.getByTestId( - "handle-splittext-shownode-data inputs-left", + "handle-splittext-shownode-input documents-left", ); await splitTextInputData.hover(); await page.mouse.up(); diff --git a/src/frontend/tests/core/features/logs.spec.ts b/src/frontend/tests/core/features/logs.spec.ts index a0a188365..b74272920 100644 --- a/src/frontend/tests/core/features/logs.spec.ts +++ b/src/frontend/tests/core/features/logs.spec.ts @@ -22,7 +22,7 @@ test( await page.getByTestId("side_nav_options_all-templates").click(); await page.getByRole("heading", { name: "Basic Prompting" }).click(); await expect(page.getByTestId(/.*rf__node.*/).first()).toBeVisible({ - timeout: 1000, + timeout: 3000, }); let outdatedComponents = await page .getByTestId("icon-AlertTriangle") diff --git a/src/frontend/tests/core/features/stop-building.spec.ts b/src/frontend/tests/core/features/stop-building.spec.ts index c9000dd36..9a73cc97b 100644 --- a/src/frontend/tests/core/features/stop-building.spec.ts +++ b/src/frontend/tests/core/features/stop-building.spec.ts @@ -82,7 +82,7 @@ test( await urlOutput.hover(); await page.mouse.down(); const splitTextInputData = await page.getByTestId( - "handle-splittext-shownode-data inputs-left", + "handle-splittext-shownode-input documents-left", ); await splitTextInputData.hover(); await page.mouse.up(); diff --git a/src/frontend/tests/extended/features/starter-projects.spec.ts b/src/frontend/tests/extended/features/starter-projects.spec.ts index 0730c99fe..ecf70cc0e 100644 --- a/src/frontend/tests/extended/features/starter-projects.spec.ts +++ b/src/frontend/tests/extended/features/starter-projects.spec.ts @@ -17,6 +17,8 @@ test( await page.getByTestId("search-input-template").fill("Document"); + await page.waitForTimeout(1000); + expect( page.getByTestId("template_basic-prompting-(hello,-world)"), ).toBeVisible({ visible: false, timeout: 3000 }); diff --git a/src/frontend/tests/extended/features/stop-button-playground.spec.ts b/src/frontend/tests/extended/features/stop-button-playground.spec.ts index 85bbc61b1..fcff9a0ac 100644 --- a/src/frontend/tests/extended/features/stop-button-playground.spec.ts +++ b/src/frontend/tests/extended/features/stop-button-playground.spec.ts @@ -23,9 +23,6 @@ test( await page.getByTestId("sidebar-custom-component-button").click(); await page.getByTitle("fit view").click(); - await page.getByTitle("zoom out").click(); - await page.getByTitle("zoom out").click(); - await page.getByTitle("zoom out").click(); await page.getByTestId("sidebar-search-input").click(); await page.getByTestId("sidebar-search-input").fill("chat output"); @@ -36,7 +33,9 @@ test( await page .getByTestId("outputsChat Output") - .dragTo(page.locator('//*[@id="react-flow-id"]')); + .dragTo(page.locator('//*[@id="react-flow-id"]'), { + targetPosition: { x: 400, y: 400 }, + }); await adjustScreenView(page);