diff --git a/tests/conftest.py b/tests/conftest.py index ca0bb1dc0..328a168ad 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -122,116 +122,146 @@ def client_fixture(session: Session): # def custom_chain(): return '''from __future__ import annotations - from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional - from pydantic import Extra +from pydantic import Extra - from langchain.schema import BaseLanguageModel, Document - from langchain.callbacks.manager import ( - AsyncCallbackManagerForChainRun, - CallbackManagerForChainRun, - ) - from langchain.chains.base import Chain - from langchain.prompts import StringPromptTemplate - from langflow.interface.custom.base import CustomComponent +from langchain.schema import BaseLanguageModel, Document +from langchain.callbacks.manager import ( + AsyncCallbackManagerForChainRun, + CallbackManagerForChainRun, +) +from langchain.chains.base import Chain +from langchain.prompts import StringPromptTemplate +from langflow.interface.custom.base import CustomComponent - class MyCustomChain(Chain): +class MyCustomChain(Chain): + """ + An example of a custom chain. + """ + + prompt: StringPromptTemplate + """Prompt object to use.""" + llm: BaseLanguageModel + output_key: str = "text" #: :meta private: + + class Config: + """Configuration for this pydantic object.""" + + extra = Extra.forbid + arbitrary_types_allowed = True + + @property + def input_keys(self) -> List[str]: + """Will be whatever keys the prompt expects. + + :meta private: """ - An example of a custom chain. + return self.prompt.input_variables + + @property + def output_keys(self) -> List[str]: + """Will always return text key. + + :meta private: """ + return [self.output_key] - prompt: StringPromptTemplate - """Prompt object to use.""" - llm: BaseLanguageModel - output_key: str = "text" #: :meta private: + def _call( + self, + inputs: Dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> Dict[str, str]: + # Your custom chain logic goes here + # This is just an example that mimics LLMChain + prompt_value = self.prompt.format_prompt(**inputs) - class Config: - """Configuration for this pydantic object.""" + # Whenever you call a language model, or another chain, you should pass + # a callback manager to it. This allows the inner run to be tracked by + # any callbacks that are registered on the outer run. + # You can always obtain a callback manager for this by calling + # `run_manager.get_child()` as shown below. + response = self.llm.generate_prompt( + [prompt_value], + callbacks=run_manager.get_child() if run_manager else None, + ) - extra = Extra.forbid - arbitrary_types_allowed = True + # If you want to log something about this run, you can do so by calling + # methods on the `run_manager`, as shown below. This will trigger any + # callbacks that are registered for that event. + if run_manager: + run_manager.on_text("Log something about this run") - @property - def input_keys(self) -> List[str]: - """Will be whatever keys the prompt expects. + return {self.output_key: response.generations[0][0].text} - :meta private: - """ - return self.prompt.input_variables + async def _acall( + self, + inputs: Dict[str, Any], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> Dict[str, str]: + # Your custom chain logic goes here + # This is just an example that mimics LLMChain + prompt_value = self.prompt.format_prompt(**inputs) - @property - def output_keys(self) -> List[str]: - """Will always return text key. + # Whenever you call a language model, or another chain, you should pass + # a callback manager to it. This allows the inner run to be tracked by + # any callbacks that are registered on the outer run. + # You can always obtain a callback manager for this by calling + # `run_manager.get_child()` as shown below. + response = await self.llm.agenerate_prompt( + [prompt_value], + callbacks=run_manager.get_child() if run_manager else None, + ) - :meta private: - """ - return [self.output_key] + # If you want to log something about this run, you can do so by calling + # methods on the `run_manager`, as shown below. This will trigger any + # callbacks that are registered for that event. + if run_manager: + await run_manager.on_text("Log something about this run") - def _call( - self, - inputs: Dict[str, Any], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, str]: - # Your custom chain logic goes here - # This is just an example that mimics LLMChain - prompt_value = self.prompt.format_prompt(**inputs) + return {self.output_key: response.generations[0][0].text} - # Whenever you call a language model, or another chain, you should pass - # a callback manager to it. This allows the inner run to be tracked by - # any callbacks that are registered on the outer run. - # You can always obtain a callback manager for this by calling - # `run_manager.get_child()` as shown below. - response = self.llm.generate_prompt( - [prompt_value], - callbacks=run_manager.get_child() if run_manager else None, - ) + @property + def _chain_type(self) -> str: + return "my_custom_chain" - # If you want to log something about this run, you can do so by calling - # methods on the `run_manager`, as shown below. This will trigger any - # callbacks that are registered for that event. - if run_manager: - run_manager.on_text("Log something about this run") +class CustomChain(CustomComponent): + display_name: str = "Custom Chain" + field_config = { + "prompt": {"field_type": "prompt"}, + "llm": {"field_type": "BaseLanguageModel"}, + } - return {self.output_key: response.generations[0][0].text} + def build(self, prompt, llm, input: str) -> Document: + chain = MyCustomChain(prompt=prompt, llm=llm) + return chain(input)''' - async def _acall( - self, - inputs: Dict[str, Any], - run_manager: Optional[AsyncCallbackManagerForChainRun] = None, - ) -> Dict[str, str]: - # Your custom chain logic goes here - # This is just an example that mimics LLMChain - prompt_value = self.prompt.format_prompt(**inputs) - # Whenever you call a language model, or another chain, you should pass - # a callback manager to it. This allows the inner run to be tracked by - # any callbacks that are registered on the outer run. - # You can always obtain a callback manager for this by calling - # `run_manager.get_child()` as shown below. - response = await self.llm.agenerate_prompt( - [prompt_value], - callbacks=run_manager.get_child() if run_manager else None, - ) +@pytest.fixture +def data_processing(): + return """import pandas as pd +from langchain.schema import Document +from langflow.interface.custom.base import CustomComponent - # If you want to log something about this run, you can do so by calling - # methods on the `run_manager`, as shown below. This will trigger any - # callbacks that are registered for that event. - if run_manager: - await run_manager.on_text("Log something about this run") +class CSVLoaderComponent(CustomComponent): + display_name: str = "CSV Loader" + field_config = { + "filename": {"field_type": "str", "required": True}, + "column_name": {"field_type": "str", "required": True}, + } - return {self.output_key: response.generations[0][0].text} + def build(self, filename: str, column_name: str) -> List[Document]: + # Load the CSV file + df = pd.read_csv(filename) - @property - def _chain_type(self) -> str: - return "my_custom_chain" + # Verify the column exists + if column_name not in df.columns: + raise ValueError(f"Column '{column_name}' not found in the CSV file") - class CustomChain(CustomComponent): - display_name: str = "Custom Chain" - field_config = { - "prompt": {"field_type": "prompt"}, - "llm": {"field_type": "BaseLanguageModel"}, - } + # Convert each row of the specified column to a document object + documents = [] + for content in df[column_name]: + metadata = {"filename": filename} + documents.append(Document(page_content=str(content), metadata=metadata)) - def build(self, prompt, llm, input: str) -> Document: - chain = MyCustomChain(prompt=prompt, llm=llm) - return chain(input)''' + return documents"""