Merge remote-tracking branch 'origin/dev' into feature/store
This commit is contained in:
commit
024ca2540f
5 changed files with 85 additions and 23 deletions
|
|
@ -143,7 +143,7 @@ Alternatively, click the **"Open in Cloud Shell"** button below to launch Google
|
|||
|
||||
# 🎨 Creating Flows
|
||||
|
||||
Creating flows with Langflow is easy. Simply drag sidebar components onto the canvas and connect them together to create your pipeline. Langflow provides a range of [LangChain components](https://docs.langchain.com/docs/category/components) to choose from, including LLMs, prompt serializers, agents, and chains.
|
||||
Creating flows with Langflow is easy. Simply drag sidebar components onto the canvas and connect them together to create your pipeline. Langflow provides a range of [LangChain components](https://python.langchain.com/docs/integrations/components) to choose from, including LLMs, prompt serializers, agents, and chains.
|
||||
|
||||
Explore by editing prompt parameters, link chains and agents, track an agent's thought process, and export your flow.
|
||||
|
||||
|
|
|
|||
|
|
@ -57,12 +57,7 @@ def update_frontend_node_with_template_values(frontend_node, raw_template_data):
|
|||
:param raw_template_data: A dict representing raw template data.
|
||||
:return: Updated frontend node.
|
||||
"""
|
||||
if (
|
||||
not frontend_node
|
||||
or "template" not in frontend_node
|
||||
or not raw_template_data
|
||||
or not raw_template_data.template
|
||||
):
|
||||
if not frontend_node or "template" not in frontend_node or not raw_template_data or not raw_template_data.template:
|
||||
return frontend_node
|
||||
|
||||
frontend_template = frontend_node.get("template", {})
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
from typing import Optional
|
||||
from langflow import CustomComponent
|
||||
|
||||
from langchain.embeddings import BedrockEmbeddings
|
||||
from langchain.embeddings.base import Embeddings
|
||||
from langflow import CustomComponent
|
||||
|
||||
|
||||
class AmazonBedrockEmeddingsComponent(CustomComponent):
|
||||
|
|
|
|||
71
src/backend/langflow/components/llms/AnthropicLLM.py
Normal file
71
src/backend/langflow/components/llms/AnthropicLLM.py
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
from typing import Optional
|
||||
from langflow import CustomComponent
|
||||
from langchain.chat_models.anthropic import ChatAnthropic
|
||||
from langchain.llms.base import BaseLLM
|
||||
|
||||
|
||||
class AnthropicLLM(CustomComponent):
|
||||
display_name: str = "AnthropicLLM"
|
||||
description: str = "Anthropic Chat&Completion large language models."
|
||||
|
||||
def build_config(self):
|
||||
return {
|
||||
"model": {
|
||||
"display_name": "Model Name",
|
||||
"options": [
|
||||
"claude-2.1",
|
||||
"claude-2.0",
|
||||
"claude-instant-1.2",
|
||||
"claude-instant-1",
|
||||
# Add more models as needed
|
||||
],
|
||||
"info": "https://python.langchain.com/docs/integrations/chat/anthropic",
|
||||
"required": True,
|
||||
"value": "claude-2.1",
|
||||
},
|
||||
"anthropic_api_key": {
|
||||
"display_name": "Anthropic API Key",
|
||||
"required": True,
|
||||
"password": True,
|
||||
"info": "Your Anthropic API key.",
|
||||
},
|
||||
"max_tokens": {
|
||||
"display_name": "Max Tokens",
|
||||
"field_type": "int",
|
||||
"value": 256,
|
||||
},
|
||||
"temperature": {
|
||||
"display_name": "Temperature",
|
||||
"field_type": "float",
|
||||
"value": 0.7,
|
||||
},
|
||||
"api_endpoint": {
|
||||
"display_name": "API Endpoint",
|
||||
"info": "Endpoint of the Anthropic API. Defaults to 'https://api.anthropic.com' if not specified.",
|
||||
},
|
||||
"code": {"show": False},
|
||||
}
|
||||
|
||||
def build(
|
||||
self,
|
||||
model: str,
|
||||
anthropic_api_key: Optional[str] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
temperature: Optional[float] = None,
|
||||
api_endpoint: Optional[str] = None,
|
||||
) -> BaseLLM:
|
||||
# Set default API endpoint if not provided
|
||||
if not api_endpoint:
|
||||
api_endpoint = "https://api.anthropic.com"
|
||||
|
||||
try:
|
||||
output = ChatAnthropic(
|
||||
model=model,
|
||||
anthropic_api_key=anthropic_api_key,
|
||||
max_tokens_to_sample=max_tokens,
|
||||
temperature=temperature,
|
||||
anthropic_api_url=api_endpoint,
|
||||
)
|
||||
except Exception as e:
|
||||
raise ValueError("Could not connect to Anthropic API.") from e
|
||||
return output
|
||||
|
|
@ -3,6 +3,7 @@ import json
|
|||
from pathlib import Path
|
||||
from typing import Any, Coroutine, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from langchain.agents import AgentExecutor
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.schema import AgentAction, Document
|
||||
from langchain.vectorstores.base import VectorStore
|
||||
|
|
@ -66,7 +67,11 @@ def get_result_and_thought(langchain_object: Any, inputs: dict):
|
|||
if hasattr(langchain_object, "return_intermediate_steps"):
|
||||
langchain_object.return_intermediate_steps = False
|
||||
|
||||
fix_memory_inputs(langchain_object)
|
||||
try:
|
||||
if not isinstance(langchain_object, AgentExecutor):
|
||||
fix_memory_inputs(langchain_object)
|
||||
except Exception as exc:
|
||||
logger.error(f"Error fixing memory inputs: {exc}")
|
||||
|
||||
try:
|
||||
output = langchain_object(inputs, return_only_outputs=True)
|
||||
|
|
@ -101,18 +106,6 @@ def get_build_result(data_graph, session_id):
|
|||
return build_sorted_vertices(data_graph)
|
||||
|
||||
|
||||
def load_langchain_object(
|
||||
data_graph: Dict[str, Any], session_id: str
|
||||
) -> Tuple[Union[Chain, VectorStore], Dict[str, Any], str]:
|
||||
langchain_object, artifacts = get_build_result(data_graph, session_id)
|
||||
logger.debug("Loaded LangChain object")
|
||||
|
||||
if langchain_object is None:
|
||||
raise ValueError("There was an error loading the langchain_object. Please, check all the nodes and try again.")
|
||||
|
||||
return langchain_object, artifacts, session_id
|
||||
|
||||
|
||||
def process_inputs(inputs: Optional[dict], artifacts: Dict[str, Any]) -> dict:
|
||||
if inputs is None:
|
||||
inputs = {}
|
||||
|
|
@ -162,9 +155,12 @@ async def process_graph_cached(
|
|||
if session_id is None:
|
||||
session_id = session_service.generate_key(session_id=session_id, data_graph=data_graph)
|
||||
# Load the graph using SessionService
|
||||
graph, artifacts = await session_service.load_session(session_id, data_graph)
|
||||
session = await session_service.load_session(session_id, data_graph)
|
||||
graph, artifacts = session if session else (None, None)
|
||||
if not graph:
|
||||
raise ValueError("Graph not found in the session")
|
||||
built_object = await graph.build()
|
||||
processed_inputs = process_inputs(inputs, artifacts)
|
||||
processed_inputs = process_inputs(inputs, artifacts or {})
|
||||
result = generate_result(built_object, processed_inputs)
|
||||
# langchain_object is now updated with the new memory
|
||||
# we need to update the cache with the updated langchain_object
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue