🔧 refactor(constants.py, conftest.py): improve code structure and readability

🔨 refactor(constants.py): make YourComponent inherit from CustomComponent for better code reuse and structure

🔨 refactor(conftest.py): restructure custom_chain fixture for better readability and maintainability
This commit is contained in:
gustavoschaedler 2023-07-14 18:29:54 +01:00
commit cc2dbe1490
2 changed files with 106 additions and 107 deletions

View file

@ -25,17 +25,19 @@ LANGCHAIN_BASE_TYPES = {
DEFAULT_CUSTOM_COMPONENT_CODE = """
from langflow import Prompt
from langflow.interface.custom.custom_component import CustomComponent
from langchain.llms.base import BaseLLM
from langchain.chains import LLMChain
from langflow.interface.custom import CustomComponent
from langchain import PromptTemplate
from langchain.schema import Document
import requests
class YourComponent:
display_name: str = "Your Component"
description: str = "Your description"
field_config = { "url": { "multiline": True, "required": True } }
class YourComponent(CustomComponent):
#display_name: str = "Your Component"
#description: str = "Your description"
#field_config = { "url": { "multiline": True, "required": True } }
def build(self, url: str, llm: BaseLLM, template: Prompt) -> Document:
response = requests.get(url)
@ -44,7 +46,3 @@ class YourComponent:
result = chain.run(response.text[:300])
return Document(page_content=str(result))
"""
# Create a new class that can be used as a type
# that returns type "prompt" if we get a certain param

View file

@ -120,118 +120,119 @@ def client_fixture(session: Session): #
@pytest.fixture
def custom_chain():
return '''from __future__ import annotations
return '''
from __future__ import annotations
from typing import Any, Dict, List, Optional
from typing import Any, Dict, List, Optional
from pydantic import Extra
from pydantic import Extra
from langchain.schema import BaseLanguageModel, Document
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain.chains.base import Chain
from langchain.prompts import StringPromptTemplate
from langflow.interface.custom.base import CustomComponent
from langchain.schema import BaseLanguageModel, Document
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain.chains.base import Chain
from langchain.prompts import StringPromptTemplate
from langflow.interface.custom.base import CustomComponent
class MyCustomChain(Chain):
"""
An example of a custom chain.
"""
class MyCustomChain(Chain):
prompt: StringPromptTemplate
"""Prompt object to use."""
llm: BaseLanguageModel
output_key: str = "text" #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Will be whatever keys the prompt expects.
:meta private:
"""
An example of a custom chain.
return self.prompt.input_variables
@property
def output_keys(self) -> List[str]:
"""Will always return text key.
:meta private:
"""
return [self.output_key]
prompt: StringPromptTemplate
"""Prompt object to use."""
llm: BaseLanguageModel
output_key: str = "text" #: :meta private:
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
# Your custom chain logic goes here
# This is just an example that mimics LLMChain
prompt_value = self.prompt.format_prompt(**inputs)
class Config:
"""Configuration for this pydantic object."""
# Whenever you call a language model, or another chain, you should pass
# a callback manager to it. This allows the inner run to be tracked by
# any callbacks that are registered on the outer run.
# You can always obtain a callback manager for this by calling
# `run_manager.get_child()` as shown below.
response = self.llm.generate_prompt(
[prompt_value],
callbacks=run_manager.get_child() if run_manager else None,
)
extra = Extra.forbid
arbitrary_types_allowed = True
# If you want to log something about this run, you can do so by calling
# methods on the `run_manager`, as shown below. This will trigger any
# callbacks that are registered for that event.
if run_manager:
run_manager.on_text("Log something about this run")
@property
def input_keys(self) -> List[str]:
"""Will be whatever keys the prompt expects.
return {self.output_key: response.generations[0][0].text}
:meta private:
"""
return self.prompt.input_variables
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
# Your custom chain logic goes here
# This is just an example that mimics LLMChain
prompt_value = self.prompt.format_prompt(**inputs)
@property
def output_keys(self) -> List[str]:
"""Will always return text key.
# Whenever you call a language model, or another chain, you should pass
# a callback manager to it. This allows the inner run to be tracked by
# any callbacks that are registered on the outer run.
# You can always obtain a callback manager for this by calling
# `run_manager.get_child()` as shown below.
response = await self.llm.agenerate_prompt(
[prompt_value],
callbacks=run_manager.get_child() if run_manager else None,
)
:meta private:
"""
return [self.output_key]
# If you want to log something about this run, you can do so by calling
# methods on the `run_manager`, as shown below. This will trigger any
# callbacks that are registered for that event.
if run_manager:
await run_manager.on_text("Log something about this run")
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
# Your custom chain logic goes here
# This is just an example that mimics LLMChain
prompt_value = self.prompt.format_prompt(**inputs)
return {self.output_key: response.generations[0][0].text}
# Whenever you call a language model, or another chain, you should pass
# a callback manager to it. This allows the inner run to be tracked by
# any callbacks that are registered on the outer run.
# You can always obtain a callback manager for this by calling
# `run_manager.get_child()` as shown below.
response = self.llm.generate_prompt(
[prompt_value],
callbacks=run_manager.get_child() if run_manager else None,
)
@property
def _chain_type(self) -> str:
return "my_custom_chain"
# If you want to log something about this run, you can do so by calling
# methods on the `run_manager`, as shown below. This will trigger any
# callbacks that are registered for that event.
if run_manager:
run_manager.on_text("Log something about this run")
class CustomChain(CustomComponent):
display_name: str = "Custom Chain"
field_config = {
"prompt": {"field_type": "prompt"},
"llm": {"field_type": "BaseLanguageModel"},
}
return {self.output_key: response.generations[0][0].text}
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
# Your custom chain logic goes here
# This is just an example that mimics LLMChain
prompt_value = self.prompt.format_prompt(**inputs)
# Whenever you call a language model, or another chain, you should pass
# a callback manager to it. This allows the inner run to be tracked by
# any callbacks that are registered on the outer run.
# You can always obtain a callback manager for this by calling
# `run_manager.get_child()` as shown below.
response = await self.llm.agenerate_prompt(
[prompt_value],
callbacks=run_manager.get_child() if run_manager else None,
)
# If you want to log something about this run, you can do so by calling
# methods on the `run_manager`, as shown below. This will trigger any
# callbacks that are registered for that event.
if run_manager:
await run_manager.on_text("Log something about this run")
return {self.output_key: response.generations[0][0].text}
@property
def _chain_type(self) -> str:
return "my_custom_chain"
class CustomChain(CustomComponent):
display_name: str = "Custom Chain"
field_config = {
"prompt": {"field_type": "prompt"},
"llm": {"field_type": "BaseLanguageModel"},
}
def build(self, prompt, llm, input: str) -> Document:
chain = MyCustomChain(prompt=prompt, llm=llm)
return chain(input)'''
def build(self, prompt: StringPromptTemplate, llm: BaseLanguageModel, input: str) -> Document:
chain = MyCustomChain(prompt=prompt, llm=llm)
return chain(input)
'''