From f2e3f2ccc584b35a986f63d6b459f2df65bb60e9 Mon Sep 17 00:00:00 2001
From: Jordan Frazier <122494242+jordanrfrazier@users.noreply.github.com>
Date: Mon, 9 Jun 2025 08:20:34 -0700
Subject: [PATCH] ref: standardize imports to use full qualified names (#7575)
* Remove models from __init__.py to avoid circular dependency
* [autofix.ci] apply automated fixes
* [autofix.ci] apply automated fixes (attempt 2/3)
* add back init to avoid backwards-compat issues
* [autofix.ci] apply automated fixes
* ref: standardize imports to use full qualified names
Replaces uses of the "public" APIs to use full qualified
names to reduce instances of circular dependencies
* [autofix.ci] apply automated fixes
* [autofix.ci] apply automated fixes (attempt 2/3)
* Fix path
* update starters"
"
* remove old files'
* [autofix.ci] apply automated fixes
* Fix some imports
* [autofix.ci] apply automated fixes
* ruff
* [autofix.ci] apply automated fixes
* starter projects?
* starter projects?
* [autofix.ci] apply automated fixes
* [autofix.ci] apply automated fixes
---------
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
---
src/backend/base/langflow/__main__.py | 2 +-
src/backend/base/langflow/api/build.py | 2 +-
.../base/langflow/api/health_check_router.py | 2 +-
src/backend/base/langflow/api/utils.py | 6 +-
src/backend/base/langflow/api/v1/endpoints.py | 3 +-
src/backend/base/langflow/api/v1/files.py | 2 +-
src/backend/base/langflow/api/v1/flows.py | 10 +-
src/backend/base/langflow/api/v1/mcp.py | 3 +-
src/backend/base/langflow/api/v1/schemas.py | 6 +-
src/backend/base/langflow/api/v1/users.py | 2 +-
src/backend/base/langflow/api/v1/variable.py | 2 +-
.../base/langflow/api/v1/voice_mode.py | 3 +-
src/backend/base/langflow/api/v2/files.py | 2 +-
.../base/langflow/base/agents/agent.py | 7 +-
.../base/langflow/base/agents/crewai/crew.py | 2 +-
.../base/langflow/base/agents/utils.py | 2 +-
.../base/langflow/base/chains/model.py | 4 +-
.../langflow/base/composio/composio_base.py | 4 +-
.../base/langflow/base/compressors/model.py | 4 +-
.../base/langflow/base/data/base_file.py | 4 +-
src/backend/base/langflow/base/data/utils.py | 2 +-
.../base/document_transformers/model.py | 4 +-
.../base/langflow/base/embeddings/model.py | 2 +-
.../langflow/base/flow_processing/utils.py | 2 +-
src/backend/base/langflow/base/io/chat.py | 2 +-
src/backend/base/langflow/base/io/text.py | 2 +-
.../base/langchain_utilities/model.py | 5 +-
src/backend/base/langflow/base/mcp/util.py | 2 +-
.../base/langflow/base/memory/memory.py | 4 +-
.../base/langflow/base/memory/model.py | 4 +-
.../base/langflow/base/models/model.py | 5 +-
.../base/langflow/base/prompts/utils.py | 2 +-
.../base/langflow/base/tools/run_flow.py | 8 +-
.../base/langflow/base/vectorstores/model.py | 5 +-
.../base/langflow/base/vectorstores/utils.py | 2 +-
.../components/Notion/add_content_to_page.py | 4 +-
.../langflow/components/Notion/create_page.py | 4 +-
.../Notion/list_database_properties.py | 4 +-
.../langflow/components/Notion/list_pages.py | 4 +-
.../langflow/components/Notion/list_users.py | 4 +-
.../components/Notion/page_content_viewer.py | 4 +-
.../base/langflow/components/Notion/search.py | 4 +-
.../components/Notion/update_page_property.py | 4 +-
.../components/agentql/agentql_api.py | 4 +-
.../base/langflow/components/agents/agent.py | 2 +-
.../amazon/amazon_bedrock_embedding.py | 2 +-
.../components/amazon/amazon_bedrock_model.py | 2 +-
.../components/amazon/s3_bucket_uploader.py | 2 +-
.../langflow/components/apify/apify_actor.py | 4 +-
.../assemblyai/assemblyai_get_subtitles.py | 4 +-
.../components/assemblyai/assemblyai_lemur.py | 4 +-
.../assemblyai/assemblyai_list_transcripts.py | 4 +-
.../assemblyai/assemblyai_poll_transcript.py | 4 +-
.../assemblyai/assemblyai_start_transcript.py | 4 +-
.../components/composio/composio_api.py | 2 +-
.../components/composio/gmail_composio.py | 2 +-
.../components/confluence/confluence.py | 4 +-
.../base/langflow/components/crewai/crewai.py | 2 +-
.../components/crewai/hierarchical_task.py | 2 +-
.../components/crewai/sequential_task.py | 2 +-
.../crewai/sequential_task_agent.py | 2 +-
.../custom_component/custom_component.py | 4 +-
.../langflow/components/data/api_request.py | 4 +-
.../langflow/components/data/csv_to_data.py | 4 +-
.../langflow/components/data/directory.py | 6 +-
.../base/langflow/components/data/file.py | 4 +-
.../langflow/components/data/json_to_data.py | 4 +-
.../langflow/components/data/mcp_component.py | 7 +-
.../base/langflow/components/data/url.py | 5 +-
.../base/langflow/components/data/webhook.py | 4 +-
.../datastax/astra_assistant_manager.py | 4 +-
.../components/datastax/astradb_cql.py | 2 +-
.../components/datastax/astradb_tool.py | 2 +-
.../components/datastax/create_assistant.py | 4 +-
.../components/datastax/create_thread.py | 4 +-
.../langflow/components/datastax/dotenv.py | 6 +-
.../components/datastax/get_assistant.py | 4 +-
.../langflow/components/datastax/getenvvar.py | 6 +-
.../base/langflow/components/datastax/run.py | 6 +-
.../deactivated/code_block_extractor.py | 2 +-
.../deactivated/documents_to_data.py | 4 +-
.../langflow/components/deactivated/embed.py | 4 +-
.../deactivated/extract_key_from_data.py | 4 +-
.../components/deactivated/list_flows.py | 4 +-
.../components/deactivated/mcp_sse.py | 2 +-
.../components/deactivated/mcp_stdio.py | 2 +-
.../components/deactivated/merge_data.py | 4 +-
.../components/deactivated/message.py | 2 +-
.../deactivated/selective_passthrough.py | 2 +-
.../components/deactivated/should_run_next.py | 2 +-
.../components/deactivated/split_text.py | 4 +-
.../components/deactivated/store_message.py | 2 +-
.../components/deactivated/sub_flow.py | 4 +-
.../components/embeddings/astra_vectorize.py | 2 +-
.../embeddings/google_generative_ai.py | 2 +-
.../components/embeddings/similarity.py | 4 +-
.../components/embeddings/text_embedder.py | 4 +-
.../firecrawl/firecrawl_crawl_api.py | 4 +-
.../firecrawl/firecrawl_extract_api.py | 4 +-
.../components/firecrawl/firecrawl_map_api.py | 4 +-
.../firecrawl/firecrawl_scrape_api.py | 4 +-
.../base/langflow/components/git/git.py | 4 +-
.../langflow/components/git/gitextractor.py | 4 +-
.../base/langflow/components/google/gmail.py | 8 +-
.../components/google/google_drive.py | 8 +-
.../components/google/google_drive_search.py | 8 +-
.../components/google/google_oauth_token.py | 4 +-
.../components/helpers/calculator_core.py | 6 +-
.../components/helpers/create_list.py | 8 +-
.../components/helpers/current_date.py | 2 +-
.../components/helpers/id_generator.py | 4 +-
.../langflow/components/helpers/memory.py | 9 +-
.../components/helpers/store_message.py | 10 +-
.../homeassistant/home_assistant_control.py | 4 +-
.../list_home_assistant_states.py | 4 +-
.../icosacomputing/combinatorial_reasoner.py | 6 +-
.../langflow/components/input_output/chat.py | 2 +-
.../components/input_output/chat_output.py | 5 +-
.../langchain_utilities/character.py | 2 +-
.../langchain_utilities/conversation.py | 2 +-
.../langchain_utilities/csv_agent.py | 9 +-
.../html_link_extractor.py | 2 +-
.../langchain_utilities/json_agent.py | 2 +-
.../json_document_builder.py | 2 +-
.../langchain_utilities/langchain_hub.py | 4 +-
.../langchain_utilities/language_recursive.py | 2 +-
.../langchain_utilities/language_semantic.py | 2 +-
.../langchain_utilities/llm_checker.py | 2 +-
.../langchain_utilities/llm_math.py | 4 +-
.../langchain_utilities/natural_language.py | 2 +-
.../langchain_utilities/openai_tools.py | 9 +-
.../components/langchain_utilities/openapi.py | 2 +-
.../langchain_utilities/retrieval_qa.py | 2 +-
.../langchain_utilities/retriever.py | 2 +-
.../langchain_utilities/runnable_executor.py | 6 +-
.../langchain_utilities/self_query.py | 6 +-
.../components/langchain_utilities/spider.py | 4 +-
.../components/langchain_utilities/sql.py | 2 +-
.../langchain_utilities/sql_database.py | 2 +-
.../langchain_utilities/sql_generator.py | 4 +-
.../langchain_utilities/tool_calling.py | 9 +-
.../langchain_utilities/vector_store.py | 2 +-
.../langchain_utilities/vector_store_info.py | 6 +-
.../vector_store_router.py | 2 +-
.../langchain_utilities/xml_agent.py | 9 +-
.../components/languagemodels/aiml.py | 2 +-
.../components/languagemodels/azure_openai.py | 2 +-
.../components/languagemodels/deepseek.py | 2 +-
.../languagemodels/google_generative_ai.py | 12 ++-
.../languagemodels/lmstudiomodel.py | 2 +-
.../components/languagemodels/maritalk.py | 2 +-
.../components/languagemodels/novita.py | 4 +-
.../components/languagemodels/nvidia.py | 2 +-
.../languagemodels/openai_chat_model.py | 2 +-
.../components/languagemodels/openrouter.py | 2 +-
.../components/languagemodels/vertexai.py | 2 +-
.../components/languagemodels/watsonx.py | 2 +-
.../langflow/components/languagemodels/xai.py | 10 +-
.../components/langwatch/langwatch.py | 4 +-
.../components/logic/conditional_router.py | 2 +-
.../logic/data_conditional_router.py | 5 +-
.../langflow/components/logic/flow_tool.py | 2 +-
.../base/langflow/components/logic/listen.py | 4 +-
.../base/langflow/components/logic/loop.py | 7 +-
.../base/langflow/components/logic/notify.py | 4 +-
.../langflow/components/logic/pass_message.py | 4 +-
.../langflow/components/logic/run_flow.py | 2 +-
.../langflow/components/logic/sub_flow.py | 5 +-
.../langflow/components/memories/astra_db.py | 2 +-
.../langflow/components/memories/cassandra.py | 2 +-
.../components/memories/mem0_chat_memory.py | 4 +-
.../langflow/components/memories/redis.py | 2 +-
.../base/langflow/components/memories/zep.py | 2 +-
.../components/nvidia/nvidia_ingest.py | 12 +--
.../base/langflow/components/olivya/olivya.py | 4 +-
.../components/processing/alter_metadata.py | 7 +-
.../components/processing/batch_run.py | 4 +-
.../components/processing/combine_text.py | 2 +-
.../components/processing/create_data.py | 4 +-
.../processing/data_to_dataframe.py | 5 +-
.../processing/dataframe_operations.py | 4 +-
.../components/processing/extract_key.py | 4 +-
.../components/processing/filter_data.py | 4 +-
.../processing/filter_data_values.py | 4 +-
.../components/processing/json_cleaner.py | 6 +-
.../components/processing/lambda_filter.py | 4 +-
.../components/processing/llm_router.py | 7 +-
.../components/processing/merge_data.py | 4 +-
.../components/processing/message_to_data.py | 4 +-
.../components/processing/parse_data.py | 4 +-
.../components/processing/parse_dataframe.py | 2 +-
.../components/processing/parse_json_data.py | 6 +-
.../langflow/components/processing/parser.py | 15 +--
.../components/processing/python_repl_core.py | 4 +-
.../langflow/components/processing/regex.py | 4 +-
.../components/processing/select_data.py | 4 +-
.../components/processing/split_text.py | 5 +-
.../processing/structured_output.py | 2 +-
.../components/processing/update_data.py | 4 +-
.../langflow/components/prompts/prompt.py | 2 +-
.../components/prototypes/python_function.py | 5 +-
.../components/retrievers/amazon_kendra.py | 2 +-
.../langflow/components/retrievers/metal.py | 2 +-
.../components/retrievers/multi_query.py | 2 +-
.../scrapegraph_markdownify_api.py | 4 +-
.../scrapegraph/scrapegraph_search_api.py | 4 +-
.../scrapegraph_smart_scraper_api.py | 4 +-
.../base/langflow/components/search/arxiv.py | 5 +-
.../components/search/bing_search_api.py | 7 +-
.../search/duck_duck_go_search_run.py | 9 +-
.../langflow/components/search/exa_search.py | 2 +-
.../components/search/glean_search_api.py | 5 +-
.../search/google_search_api_core.py | 4 +-
.../search/google_serper_api_core.py | 4 +-
.../base/langflow/components/search/search.py | 7 +-
.../base/langflow/components/search/serp.py | 6 +-
.../langflow/components/search/wikidata.py | 8 +-
.../langflow/components/search/wikipedia.py | 7 +-
.../components/search/wolfram_alpha_api.py | 5 +-
.../base/langflow/components/search/yahoo.py | 7 +-
.../components/tavily/tavily_search.py | 7 +-
.../langflow/components/tools/calculator.py | 4 +-
.../components/tools/google_search_api.py | 4 +-
.../components/tools/google_serper_api.py | 4 +-
.../tools/python_code_structured_tool.py | 2 +-
.../langflow/components/tools/python_repl.py | 4 +-
.../langflow/components/tools/search_api.py | 4 +-
.../base/langflow/components/tools/searxng.py | 2 +-
.../langflow/components/tools/serp_api.py | 4 +-
.../components/tools/tavily_search_tool.py | 4 +-
.../langflow/components/tools/wikidata_api.py | 4 +-
.../components/tools/wikipedia_api.py | 4 +-
.../components/tools/yahoo_finance.py | 4 +-
.../components/unstructured/unstructured.py | 6 +-
.../components/vectorstores/astradb.py | 6 +-
.../components/vectorstores/astradb_graph.py | 6 +-
.../components/vectorstores/cassandra.py | 4 +-
.../vectorstores/cassandra_graph.py | 4 +-
.../components/vectorstores/chroma.py | 8 +-
.../components/vectorstores/clickhouse.py | 4 +-
.../components/vectorstores/couchbase.py | 2 +-
.../components/vectorstores/elasticsearch.py | 2 +-
.../langflow/components/vectorstores/faiss.py | 2 +-
.../components/vectorstores/graph_rag.py | 6 +-
.../langflow/components/vectorstores/hcd.py | 6 +-
.../components/vectorstores/local_db.py | 6 +-
.../components/vectorstores/milvus.py | 2 +-
.../components/vectorstores/mongodb_atlas.py | 2 +-
.../components/vectorstores/opensearch.py | 2 +-
.../components/vectorstores/pgvector.py | 2 +-
.../components/vectorstores/pinecone.py | 2 +-
.../components/vectorstores/qdrant.py | 2 +-
.../langflow/components/vectorstores/redis.py | 2 +-
.../components/vectorstores/supabase.py | 2 +-
.../components/vectorstores/upstash.py | 2 +-
.../components/vectorstores/vectara.py | 4 +-
.../components/vectorstores/vectara_rag.py | 2 +-
.../vectorstores/vectara_self_query.py | 2 +-
.../components/vectorstores/weaviate.py | 2 +-
.../langflow/components/youtube/channel.py | 8 +-
.../langflow/components/youtube/comments.py | 8 +-
.../langflow/components/youtube/playlist.py | 9 +-
.../langflow/components/youtube/search.py | 8 +-
.../langflow/components/youtube/trending.py | 8 +-
.../components/youtube/video_details.py | 8 +-
.../components/youtube/youtube_transcripts.py | 10 +-
.../custom/custom_component/base_component.py | 2 +-
.../custom_component/component_with_cache.py | 2 +-
.../custom_component/custom_component.py | 2 +-
.../directory_reader/directory_reader.py | 2 +-
.../langflow/custom/directory_reader/utils.py | 2 +-
src/backend/base/langflow/custom/eval.py | 2 +-
src/backend/base/langflow/custom/utils.py | 4 +-
src/backend/base/langflow/graph/graph/base.py | 2 +-
src/backend/base/langflow/graph/schema.py | 2 +-
src/backend/base/langflow/graph/utils.py | 2 +-
.../base/langflow/graph/vertex/base.py | 2 +-
.../langflow/graph/vertex/vertex_types.py | 4 +-
src/backend/base/langflow/helpers/data.py | 3 +-
src/backend/base/langflow/helpers/flow.py | 5 +-
.../Basic Prompt Chaining.json | 16 +--
.../starter_projects/Basic Prompting.json | 8 +-
.../starter_projects/Blog Writer.json | 10 +-
.../Custom Component Maker.json | 8 +-
.../starter_projects/Diet Analysis.json | 8 +-
.../starter_projects/Document Q&A.json | 10 +-
.../starter_projects/Financial Agent.json | 16 +--
.../Financial Report Parser.json | 10 +-
.../starter_projects/Gmail Agent.json | 8 +-
.../starter_projects/Hybrid Search RAG.json | 14 +--
.../Image Sentiment Analysis.json | 10 +-
.../Instagram Copywriter.json | 18 ++--
.../starter_projects/Invoice Summarizer.json | 8 +-
.../starter_projects/Market Research.json | 12 +--
.../starter_projects/Meeting Summary.json | 24 ++---
.../starter_projects/Memory Chatbot.json | 10 +-
.../starter_projects/News Aggregator.json | 8 +-
.../starter_projects/Pokédex Agent.json | 8 +-
.../Portfolio Website Code Generator.json | 8 +-
.../starter_projects/Price Deal Finder.json | 10 +-
.../starter_projects/Research Agent.json | 20 ++--
.../Research Translation Loop.json | 12 +--
.../SEO Keyword Generator.json | 8 +-
.../starter_projects/SaaS Pricing.json | 8 +-
.../starter_projects/Search agent.json | 8 +-
.../Sequential Tasks Agents.json | 22 ++---
.../starter_projects/Simple Agent.json | 8 +-
.../starter_projects/Social Media Agent.json | 10 +-
.../Text Sentiment Analysis.json | 18 ++--
.../Travel Planning Agents.json | 14 +--
.../Twitter Thread Generator.json | 8 +-
.../starter_projects/Vector Store RAG.json | 16 +--
.../starter_projects/Youtube Analysis.json | 18 ++--
.../langflow/interface/initialize/loading.py | 5 +-
src/backend/base/langflow/load/load.py | 2 +-
.../services/database/models/api_key/crud.py | 4 +-
.../services/database/models/api_key/model.py | 2 +-
.../services/database/models/flow/model.py | 6 +-
.../services/database/models/user/model.py | 8 +-
.../shared_component_cache/service.py | 2 +-
.../base/langflow/utils/data_structure.py | 2 +-
src/backend/base/langflow/utils/util.py | 2 +-
.../unit/base/tools/test_component_toolkit.py | 2 +-
.../languagemodels/test_deepseek.py | 2 +-
.../components/languagemodels/test_xai.py | 4 +-
.../models/test_language_model_component.py | 2 +-
.../tests/unit/graph/edge/test_edge_base.py | 2 +-
.../tests/unit/graph/graph/test_cycles.py | 4 +-
.../graph/graph/test_graph_state_model.py | 2 +-
.../starter_projects/test_memory_chatbot.py | 2 +-
.../starter_projects/test_vector_store_rag.py | 2 +-
uv.lock | 99 ++++++++++---------
332 files changed, 863 insertions(+), 803 deletions(-)
diff --git a/src/backend/base/langflow/__main__.py b/src/backend/base/langflow/__main__.py
index 381628bc3..30da0aa65 100644
--- a/src/backend/base/langflow/__main__.py
+++ b/src/backend/base/langflow/__main__.py
@@ -596,8 +596,8 @@ def api_key(
"Default superuser not found. This command requires a superuser and AUTO_LOGIN to be enabled."
)
return None
- from langflow.services.database.models.api_key import ApiKey, ApiKeyCreate
from langflow.services.database.models.api_key.crud import create_api_key, delete_api_key
+ from langflow.services.database.models.api_key.model import ApiKey, ApiKeyCreate
stmt = select(ApiKey).where(ApiKey.user_id == superuser.id)
api_key = (await session.exec(stmt)).first()
diff --git a/src/backend/base/langflow/api/build.py b/src/backend/base/langflow/api/build.py
index 9ae6f8bcb..9e3e92e92 100644
--- a/src/backend/base/langflow/api/build.py
+++ b/src/backend/base/langflow/api/build.py
@@ -32,7 +32,7 @@ from langflow.graph.graph.base import Graph
from langflow.graph.utils import log_vertex_build
from langflow.schema.message import ErrorMessage
from langflow.schema.schema import OutputValue
-from langflow.services.database.models.flow import Flow
+from langflow.services.database.models.flow.model import Flow
from langflow.services.deps import get_chat_service, get_telemetry_service, session_scope
from langflow.services.job_queue.service import JobQueueNotFoundError, JobQueueService
from langflow.services.telemetry.schema import ComponentPayload, PlaygroundPayload
diff --git a/src/backend/base/langflow/api/health_check_router.py b/src/backend/base/langflow/api/health_check_router.py
index 3edeb08c0..6c5316cd9 100644
--- a/src/backend/base/langflow/api/health_check_router.py
+++ b/src/backend/base/langflow/api/health_check_router.py
@@ -6,7 +6,7 @@ from pydantic import BaseModel
from sqlmodel import select
from langflow.api.utils import DbSession
-from langflow.services.database.models.flow import Flow
+from langflow.services.database.models.flow.model import Flow
from langflow.services.deps import get_chat_service
health_check_router = APIRouter(tags=["Health Check"])
diff --git a/src/backend/base/langflow/api/utils.py b/src/backend/base/langflow/api/utils.py
index a87bd2e81..d78edd6cd 100644
--- a/src/backend/base/langflow/api/utils.py
+++ b/src/backend/base/langflow/api/utils.py
@@ -14,10 +14,10 @@ from sqlmodel.ext.asyncio.session import AsyncSession
from langflow.graph.graph.base import Graph
from langflow.services.auth.utils import get_current_active_user
-from langflow.services.database.models import User
-from langflow.services.database.models.flow import Flow
-from langflow.services.database.models.message import MessageTable
+from langflow.services.database.models.flow.model import Flow
+from langflow.services.database.models.message.model import MessageTable
from langflow.services.database.models.transactions.model import TransactionTable
+from langflow.services.database.models.user.model import User
from langflow.services.database.models.vertex_builds.model import VertexBuildTable
from langflow.services.deps import get_session, session_scope
from langflow.services.store.utils import get_lf_version_from_pypi
diff --git a/src/backend/base/langflow/api/v1/endpoints.py b/src/backend/base/langflow/api/v1/endpoints.py
index ba6cf0834..7c6d9caca 100644
--- a/src/backend/base/langflow/api/v1/endpoints.py
+++ b/src/backend/base/langflow/api/v1/endpoints.py
@@ -40,8 +40,7 @@ from langflow.processing.process import process_tweaks, run_graph_internal
from langflow.schema.graph import Tweaks
from langflow.services.auth.utils import api_key_security, get_current_active_user
from langflow.services.cache.utils import save_uploaded_file
-from langflow.services.database.models.flow import Flow
-from langflow.services.database.models.flow.model import FlowRead
+from langflow.services.database.models.flow.model import Flow, FlowRead
from langflow.services.database.models.flow.utils import get_all_webhook_components_in_flow
from langflow.services.database.models.user.model import User, UserRead
from langflow.services.deps import get_session_service, get_settings_service, get_telemetry_service
diff --git a/src/backend/base/langflow/api/v1/files.py b/src/backend/base/langflow/api/v1/files.py
index 041329492..6909d87f5 100644
--- a/src/backend/base/langflow/api/v1/files.py
+++ b/src/backend/base/langflow/api/v1/files.py
@@ -11,7 +11,7 @@ from fastapi.responses import StreamingResponse
from langflow.api.utils import CurrentActiveUser, DbSession
from langflow.api.v1.schemas import UploadFileResponse
-from langflow.services.database.models.flow import Flow
+from langflow.services.database.models.flow.model import Flow
from langflow.services.deps import get_settings_service, get_storage_service
from langflow.services.settings.service import SettingsService
from langflow.services.storage.service import StorageService
diff --git a/src/backend/base/langflow/api/v1/flows.py b/src/backend/base/langflow/api/v1/flows.py
index 6c9a45b81..eb7055522 100644
--- a/src/backend/base/langflow/api/v1/flows.py
+++ b/src/backend/base/langflow/api/v1/flows.py
@@ -24,8 +24,14 @@ from langflow.api.v1.schemas import FlowListCreate
from langflow.helpers.user import get_user_by_flow_id_or_endpoint_name
from langflow.initial_setup.constants import STARTER_FOLDER_NAME
from langflow.logging import logger
-from langflow.services.database.models.flow import Flow, FlowCreate, FlowRead, FlowUpdate
-from langflow.services.database.models.flow.model import AccessTypeEnum, FlowHeader
+from langflow.services.database.models.flow.model import (
+ AccessTypeEnum,
+ Flow,
+ FlowCreate,
+ FlowHeader,
+ FlowRead,
+ FlowUpdate,
+)
from langflow.services.database.models.flow.utils import get_webhook_component_in_flow
from langflow.services.database.models.folder.constants import DEFAULT_FOLDER_NAME
from langflow.services.database.models.folder.model import Folder
diff --git a/src/backend/base/langflow/api/v1/mcp.py b/src/backend/base/langflow/api/v1/mcp.py
index 64d7135a4..d7029fcf6 100644
--- a/src/backend/base/langflow/api/v1/mcp.py
+++ b/src/backend/base/langflow/api/v1/mcp.py
@@ -23,7 +23,8 @@ from langflow.base.mcp.util import get_flow_snake_case
from langflow.helpers.flow import json_schema_from_flow
from langflow.schema.message import Message
from langflow.services.auth.utils import get_current_active_user
-from langflow.services.database.models import Flow, User
+from langflow.services.database.models.flow.model import Flow
+from langflow.services.database.models.user.model import User
from langflow.services.deps import (
get_db_service,
get_settings_service,
diff --git a/src/backend/base/langflow/api/v1/schemas.py b/src/backend/base/langflow/api/v1/schemas.py
index 24df41e7a..1bef87e9d 100644
--- a/src/backend/base/langflow/api/v1/schemas.py
+++ b/src/backend/base/langflow/api/v1/schemas.py
@@ -14,7 +14,7 @@ from pydantic import (
)
from langflow.graph.schema import RunOutputs
-from langflow.schema import dotdict
+from langflow.schema.dotdict import dotdict
from langflow.schema.graph import Tweaks
from langflow.schema.schema import InputType, OutputType, OutputValue
from langflow.serialization import constants as serialization_constants
@@ -22,8 +22,8 @@ from langflow.serialization.constants import MAX_ITEMS_LENGTH, MAX_TEXT_LENGTH
from langflow.serialization.serialization import serialize
from langflow.services.database.models.api_key.model import ApiKeyRead
from langflow.services.database.models.base import orjson_dumps
-from langflow.services.database.models.flow import FlowCreate, FlowRead
-from langflow.services.database.models.user import UserRead
+from langflow.services.database.models.flow.model import FlowCreate, FlowRead
+from langflow.services.database.models.user.model import UserRead
from langflow.services.settings.feature_flags import FeatureFlags
from langflow.services.tracing.schema import Log
diff --git a/src/backend/base/langflow/api/v1/users.py b/src/backend/base/langflow/api/v1/users.py
index 7bfa96c78..d25073894 100644
--- a/src/backend/base/langflow/api/v1/users.py
+++ b/src/backend/base/langflow/api/v1/users.py
@@ -15,8 +15,8 @@ from langflow.services.auth.utils import (
get_password_hash,
verify_password,
)
-from langflow.services.database.models.user import User, UserCreate, UserRead, UserUpdate
from langflow.services.database.models.user.crud import get_user_by_id, update_user
+from langflow.services.database.models.user.model import User, UserCreate, UserRead, UserUpdate
from langflow.services.deps import get_settings_service
router = APIRouter(tags=["Users"], prefix="/users")
diff --git a/src/backend/base/langflow/api/v1/variable.py b/src/backend/base/langflow/api/v1/variable.py
index c0f05bfe1..445f5402a 100644
--- a/src/backend/base/langflow/api/v1/variable.py
+++ b/src/backend/base/langflow/api/v1/variable.py
@@ -4,7 +4,7 @@ from fastapi import APIRouter, HTTPException
from sqlalchemy.exc import NoResultFound
from langflow.api.utils import CurrentActiveUser, DbSession
-from langflow.services.database.models.variable import VariableCreate, VariableRead, VariableUpdate
+from langflow.services.database.models.variable.model import VariableCreate, VariableRead, VariableUpdate
from langflow.services.deps import get_variable_service
from langflow.services.variable.constants import CREDENTIAL_TYPE
from langflow.services.variable.service import DatabaseVariableService
diff --git a/src/backend/base/langflow/api/v1/voice_mode.py b/src/backend/base/langflow/api/v1/voice_mode.py
index a96d45138..262c1237c 100644
--- a/src/backend/base/langflow/api/v1/voice_mode.py
+++ b/src/backend/base/langflow/api/v1/voice_mode.py
@@ -30,8 +30,9 @@ from langflow.logging import logger
from langflow.memory import aadd_messagetables
from langflow.schema.properties import Properties
from langflow.services.auth.utils import get_current_user_for_websocket
-from langflow.services.database.models import MessageTable, User
from langflow.services.database.models.flow.model import Flow
+from langflow.services.database.models.message.model import MessageTable
+from langflow.services.database.models.user.model import User
from langflow.services.deps import get_variable_service, session_scope
from langflow.utils.voice_utils import (
BYTES_PER_24K_FRAME,
diff --git a/src/backend/base/langflow/api/v2/files.py b/src/backend/base/langflow/api/v2/files.py
index f24d7b96a..383f1ec11 100644
--- a/src/backend/base/langflow/api/v2/files.py
+++ b/src/backend/base/langflow/api/v2/files.py
@@ -15,7 +15,7 @@ from sqlmodel import String, cast, col, select
from langflow.api.schemas import UploadFileResponse
from langflow.api.utils import CurrentActiveUser, DbSession
-from langflow.services.database.models.file import File as UserFile
+from langflow.services.database.models.file.model import File as UserFile
from langflow.services.deps import get_settings_service, get_storage_service
from langflow.services.storage.service import StorageService
diff --git a/src/backend/base/langflow/base/agents/agent.py b/src/backend/base/langflow/base/agents/agent.py
index 84eae4e38..a4dcbfeca 100644
--- a/src/backend/base/langflow/base/agents/agent.py
+++ b/src/backend/base/langflow/base/agents/agent.py
@@ -9,17 +9,16 @@ from langchain_core.runnables import Runnable
from langflow.base.agents.callback import AgentAsyncHandler
from langflow.base.agents.events import ExceptionWithMessageError, process_agent_events
from langflow.base.agents.utils import data_to_messages
-from langflow.custom import Component
-from langflow.custom.custom_component.component import _get_component_toolkit
+from langflow.custom.custom_component.component import Component, _get_component_toolkit
from langflow.field_typing import Tool
from langflow.inputs.inputs import InputTypes, MultilineInput
from langflow.io import BoolInput, HandleInput, IntInput, MessageTextInput
from langflow.logging import logger
from langflow.memory import delete_message
-from langflow.schema import Data
from langflow.schema.content_block import ContentBlock
+from langflow.schema.data import Data
from langflow.schema.message import Message
-from langflow.template import Output
+from langflow.template.field.base import Output
from langflow.utils.constants import MESSAGE_SENDER_AI
if TYPE_CHECKING:
diff --git a/src/backend/base/langflow/base/agents/crewai/crew.py b/src/backend/base/langflow/base/agents/crewai/crew.py
index 7e97ce534..d73756d1e 100644
--- a/src/backend/base/langflow/base/agents/crewai/crew.py
+++ b/src/backend/base/langflow/base/agents/crewai/crew.py
@@ -8,7 +8,7 @@ from crewai.tools.base_tool import Tool
from langchain_core.agents import AgentAction, AgentFinish
from pydantic import SecretStr
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.inputs.inputs import HandleInput, InputTypes
from langflow.io import BoolInput, IntInput, Output
from langflow.schema.data import Data
diff --git a/src/backend/base/langflow/base/agents/utils.py b/src/backend/base/langflow/base/agents/utils.py
index 29c0e52b0..0dc345b71 100644
--- a/src/backend/base/langflow/base/agents/utils.py
+++ b/src/backend/base/langflow/base/agents/utils.py
@@ -14,7 +14,7 @@ from langchain_core.prompts import BasePromptTemplate, ChatPromptTemplate
from langchain_core.tools import BaseTool
from pydantic import BaseModel
-from langflow.schema import Data
+from langflow.schema.data import Data
from .default_prompts import XML_AGENT_PROMPT
diff --git a/src/backend/base/langflow/base/chains/model.py b/src/backend/base/langflow/base/chains/model.py
index 4f9d190f5..18ff26a81 100644
--- a/src/backend/base/langflow/base/chains/model.py
+++ b/src/backend/base/langflow/base/chains/model.py
@@ -1,5 +1,5 @@
-from langflow.custom import Component
-from langflow.template import Output
+from langflow.custom.custom_component.component import Component
+from langflow.template.field.base import Output
class LCChainComponent(Component):
diff --git a/src/backend/base/langflow/base/composio/composio_base.py b/src/backend/base/langflow/base/composio/composio_base.py
index 40f555460..c1bf2d546 100644
--- a/src/backend/base/langflow/base/composio/composio_base.py
+++ b/src/backend/base/langflow/base/composio/composio_base.py
@@ -8,8 +8,8 @@ from composio.exceptions import ApiKeyError
from composio_langchain import ComposioToolSet
from langchain_core.tools import Tool
-from langflow.custom import Component
-from langflow.inputs import (
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import (
AuthInput,
MessageTextInput,
SecretStrInput,
diff --git a/src/backend/base/langflow/base/compressors/model.py b/src/backend/base/langflow/base/compressors/model.py
index fb52e641a..f780e884b 100644
--- a/src/backend/base/langflow/base/compressors/model.py
+++ b/src/backend/base/langflow/base/compressors/model.py
@@ -1,9 +1,9 @@
from abc import abstractmethod
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.field_typing import BaseDocumentCompressor
from langflow.io import DataInput, IntInput, MultilineInput
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.schema.dataframe import DataFrame
from langflow.template.field.base import Output
diff --git a/src/backend/base/langflow/base/data/base_file.py b/src/backend/base/langflow/base/data/base_file.py
index e74dfe29c..0845de3d3 100644
--- a/src/backend/base/langflow/base/data/base_file.py
+++ b/src/backend/base/langflow/base/data/base_file.py
@@ -7,9 +7,9 @@ from zipfile import ZipFile, is_zipfile
import pandas as pd
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import BoolInput, FileInput, HandleInput, Output, StrInput
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.schema.dataframe import DataFrame
from langflow.schema.message import Message
diff --git a/src/backend/base/langflow/base/data/utils.py b/src/backend/base/langflow/base/data/utils.py
index 4438d54bc..72bccd829 100644
--- a/src/backend/base/langflow/base/data/utils.py
+++ b/src/backend/base/langflow/base/data/utils.py
@@ -8,7 +8,7 @@ import orjson
import yaml
from defusedxml import ElementTree
-from langflow.schema import Data
+from langflow.schema.data import Data
# Types of files that can be read simply by file.read()
# and have 100% to be completely readable
diff --git a/src/backend/base/langflow/base/document_transformers/model.py b/src/backend/base/langflow/base/document_transformers/model.py
index f4ecb30c6..df9f2aaa9 100644
--- a/src/backend/base/langflow/base/document_transformers/model.py
+++ b/src/backend/base/langflow/base/document_transformers/model.py
@@ -3,9 +3,9 @@ from typing import Any
from langchain_core.documents import BaseDocumentTransformer
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import Output
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.utils.util import build_loader_repr_from_data
diff --git a/src/backend/base/langflow/base/embeddings/model.py b/src/backend/base/langflow/base/embeddings/model.py
index 6299b6547..bf16c12ea 100644
--- a/src/backend/base/langflow/base/embeddings/model.py
+++ b/src/backend/base/langflow/base/embeddings/model.py
@@ -1,4 +1,4 @@
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.field_typing import Embeddings
from langflow.io import Output
diff --git a/src/backend/base/langflow/base/flow_processing/utils.py b/src/backend/base/langflow/base/flow_processing/utils.py
index f1e7fdc6c..320053168 100644
--- a/src/backend/base/langflow/base/flow_processing/utils.py
+++ b/src/backend/base/langflow/base/flow_processing/utils.py
@@ -1,7 +1,7 @@
from loguru import logger
from langflow.graph.schema import ResultData, RunOutputs
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.schema.message import Message
diff --git a/src/backend/base/langflow/base/io/chat.py b/src/backend/base/langflow/base/io/chat.py
index c0448e18d..a0abe0617 100644
--- a/src/backend/base/langflow/base/io/chat.py
+++ b/src/backend/base/langflow/base/io/chat.py
@@ -1,4 +1,4 @@
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
class ChatComponent(Component):
diff --git a/src/backend/base/langflow/base/io/text.py b/src/backend/base/langflow/base/io/text.py
index d2854a8ce..b9cb03797 100644
--- a/src/backend/base/langflow/base/io/text.py
+++ b/src/backend/base/langflow/base/io/text.py
@@ -1,4 +1,4 @@
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
class TextComponent(Component):
diff --git a/src/backend/base/langflow/base/langchain_utilities/model.py b/src/backend/base/langflow/base/langchain_utilities/model.py
index 161950339..e94e8979b 100644
--- a/src/backend/base/langflow/base/langchain_utilities/model.py
+++ b/src/backend/base/langflow/base/langchain_utilities/model.py
@@ -1,10 +1,11 @@
from abc import abstractmethod
from collections.abc import Sequence
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.field_typing import Tool
from langflow.io import Output
-from langflow.schema import Data, DataFrame
+from langflow.schema.data import Data
+from langflow.schema.dataframe import DataFrame
class LCToolComponent(Component):
diff --git a/src/backend/base/langflow/base/mcp/util.py b/src/backend/base/langflow/base/mcp/util.py
index da69e9ae7..b6dcfc5fc 100644
--- a/src/backend/base/langflow/base/mcp/util.py
+++ b/src/backend/base/langflow/base/mcp/util.py
@@ -18,7 +18,7 @@ from mcp.client.sse import sse_client
from pydantic import BaseModel, Field, create_model
from sqlmodel import select
-from langflow.services.database.models import Flow
+from langflow.services.database.models.flow.model import Flow
HTTP_ERROR_STATUS_CODE = httpx_codes.BAD_REQUEST # HTTP status code for client errors
NULLABLE_TYPE_LENGTH = 2 # Number of types in a nullable union (the type itself + null)
diff --git a/src/backend/base/langflow/base/memory/memory.py b/src/backend/base/langflow/base/memory/memory.py
index c90b25028..a626a769d 100644
--- a/src/backend/base/langflow/base/memory/memory.py
+++ b/src/backend/base/langflow/base/memory/memory.py
@@ -1,5 +1,5 @@
-from langflow.custom import CustomComponent
-from langflow.schema import Data
+from langflow.custom.custom_component.custom_component import CustomComponent
+from langflow.schema.data import Data
from langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_USER
diff --git a/src/backend/base/langflow/base/memory/model.py b/src/backend/base/langflow/base/memory/model.py
index ae696f226..6d049fd32 100644
--- a/src/backend/base/langflow/base/memory/model.py
+++ b/src/backend/base/langflow/base/memory/model.py
@@ -2,10 +2,10 @@ from abc import abstractmethod
from langchain.memory import ConversationBufferMemory
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.field_typing import BaseChatMemory
from langflow.field_typing.constants import Memory
-from langflow.template import Output
+from langflow.template.field.base import Output
class LCChatMemoryComponent(Component):
diff --git a/src/backend/base/langflow/base/models/model.py b/src/backend/base/langflow/base/models/model.py
index dc978c0f5..9405137e7 100644
--- a/src/backend/base/langflow/base/models/model.py
+++ b/src/backend/base/langflow/base/models/model.py
@@ -9,10 +9,9 @@ from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, System
from langchain_core.output_parsers import BaseOutputParser
from langflow.base.constants import STREAM_INFO_TEXT
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.field_typing import LanguageModel
-from langflow.inputs import MessageInput
-from langflow.inputs.inputs import BoolInput, InputTypes, MultilineInput
+from langflow.inputs.inputs import BoolInput, InputTypes, MessageInput, MultilineInput
from langflow.schema.message import Message
from langflow.template.field.base import Output
diff --git a/src/backend/base/langflow/base/prompts/utils.py b/src/backend/base/langflow/base/prompts/utils.py
index 07bc692b6..cac903a15 100644
--- a/src/backend/base/langflow/base/prompts/utils.py
+++ b/src/backend/base/langflow/base/prompts/utils.py
@@ -2,7 +2,7 @@ from copy import deepcopy
from langchain_core.documents import Document
-from langflow.schema import Data
+from langflow.schema.data import Data
def data_to_string(record: Data) -> str:
diff --git a/src/backend/base/langflow/base/tools/run_flow.py b/src/backend/base/langflow/base/tools/run_flow.py
index 839903049..a7fe8f87a 100644
--- a/src/backend/base/langflow/base/tools/run_flow.py
+++ b/src/backend/base/langflow/base/tools/run_flow.py
@@ -4,8 +4,7 @@ from typing import TYPE_CHECKING
from loguru import logger
from typing_extensions import override
-from langflow.custom import Component
-from langflow.custom.custom_component.component import _get_component_toolkit
+from langflow.custom.custom_component.component import Component, _get_component_toolkit
from langflow.field_typing import Tool
from langflow.graph.graph.base import Graph
from langflow.graph.vertex.base import Vertex
@@ -15,10 +14,11 @@ from langflow.inputs.inputs import (
InputTypes,
MessageInput,
)
-from langflow.schema import Data, dotdict
+from langflow.schema.data import Data
from langflow.schema.dataframe import DataFrame
+from langflow.schema.dotdict import dotdict
from langflow.schema.message import Message
-from langflow.template import Output
+from langflow.template.field.base import Output
if TYPE_CHECKING:
from langflow.base.tools.component_tool import ComponentToolkit
diff --git a/src/backend/base/langflow/base/vectorstores/model.py b/src/backend/base/langflow/base/vectorstores/model.py
index 7433eb37f..b464a046d 100644
--- a/src/backend/base/langflow/base/vectorstores/model.py
+++ b/src/backend/base/langflow/base/vectorstores/model.py
@@ -2,12 +2,13 @@ from abc import abstractmethod
from functools import wraps
from typing import TYPE_CHECKING, Any
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.field_typing import Text, VectorStore
from langflow.helpers.data import docs_to_data
from langflow.inputs.inputs import BoolInput
from langflow.io import HandleInput, Output, QueryInput
-from langflow.schema import Data, DataFrame
+from langflow.schema.data import Data
+from langflow.schema.dataframe import DataFrame
if TYPE_CHECKING:
from langchain_core.documents import Document
diff --git a/src/backend/base/langflow/base/vectorstores/utils.py b/src/backend/base/langflow/base/vectorstores/utils.py
index d64891edf..4d266a5fc 100644
--- a/src/backend/base/langflow/base/vectorstores/utils.py
+++ b/src/backend/base/langflow/base/vectorstores/utils.py
@@ -1,4 +1,4 @@
-from langflow.schema import Data
+from langflow.schema.data import Data
def chroma_collection_to_data(collection_dict: dict):
diff --git a/src/backend/base/langflow/components/Notion/add_content_to_page.py b/src/backend/base/langflow/components/Notion/add_content_to_page.py
index 935f1e541..ac9b7a98c 100644
--- a/src/backend/base/langflow/components/Notion/add_content_to_page.py
+++ b/src/backend/base/langflow/components/Notion/add_content_to_page.py
@@ -10,8 +10,8 @@ from pydantic import BaseModel, Field
from langflow.base.langchain_utilities.model import LCToolComponent
from langflow.field_typing import Tool
-from langflow.inputs import MultilineInput, SecretStrInput, StrInput
-from langflow.schema import Data
+from langflow.inputs.inputs import MultilineInput, SecretStrInput, StrInput
+from langflow.schema.data import Data
MIN_ROWS_IN_TABLE = 3
diff --git a/src/backend/base/langflow/components/Notion/create_page.py b/src/backend/base/langflow/components/Notion/create_page.py
index d38a98988..a606f8f6e 100644
--- a/src/backend/base/langflow/components/Notion/create_page.py
+++ b/src/backend/base/langflow/components/Notion/create_page.py
@@ -7,8 +7,8 @@ from pydantic import BaseModel, Field
from langflow.base.langchain_utilities.model import LCToolComponent
from langflow.field_typing import Tool
-from langflow.inputs import MultilineInput, SecretStrInput, StrInput
-from langflow.schema import Data
+from langflow.inputs.inputs import MultilineInput, SecretStrInput, StrInput
+from langflow.schema.data import Data
class NotionPageCreator(LCToolComponent):
diff --git a/src/backend/base/langflow/components/Notion/list_database_properties.py b/src/backend/base/langflow/components/Notion/list_database_properties.py
index 07e9cbb4f..4c2961481 100644
--- a/src/backend/base/langflow/components/Notion/list_database_properties.py
+++ b/src/backend/base/langflow/components/Notion/list_database_properties.py
@@ -5,8 +5,8 @@ from pydantic import BaseModel, Field
from langflow.base.langchain_utilities.model import LCToolComponent
from langflow.field_typing import Tool
-from langflow.inputs import SecretStrInput, StrInput
-from langflow.schema import Data
+from langflow.inputs.inputs import SecretStrInput, StrInput
+from langflow.schema.data import Data
class NotionDatabaseProperties(LCToolComponent):
diff --git a/src/backend/base/langflow/components/Notion/list_pages.py b/src/backend/base/langflow/components/Notion/list_pages.py
index b46b8cae2..b7691b86b 100644
--- a/src/backend/base/langflow/components/Notion/list_pages.py
+++ b/src/backend/base/langflow/components/Notion/list_pages.py
@@ -8,8 +8,8 @@ from pydantic import BaseModel, Field
from langflow.base.langchain_utilities.model import LCToolComponent
from langflow.field_typing import Tool
-from langflow.inputs import MultilineInput, SecretStrInput, StrInput
-from langflow.schema import Data
+from langflow.inputs.inputs import MultilineInput, SecretStrInput, StrInput
+from langflow.schema.data import Data
class NotionListPages(LCToolComponent):
diff --git a/src/backend/base/langflow/components/Notion/list_users.py b/src/backend/base/langflow/components/Notion/list_users.py
index d99a71e1c..e5807f58f 100644
--- a/src/backend/base/langflow/components/Notion/list_users.py
+++ b/src/backend/base/langflow/components/Notion/list_users.py
@@ -4,8 +4,8 @@ from pydantic import BaseModel
from langflow.base.langchain_utilities.model import LCToolComponent
from langflow.field_typing import Tool
-from langflow.inputs import SecretStrInput
-from langflow.schema import Data
+from langflow.inputs.inputs import SecretStrInput
+from langflow.schema.data import Data
class NotionUserList(LCToolComponent):
diff --git a/src/backend/base/langflow/components/Notion/page_content_viewer.py b/src/backend/base/langflow/components/Notion/page_content_viewer.py
index f334ecded..c1287b773 100644
--- a/src/backend/base/langflow/components/Notion/page_content_viewer.py
+++ b/src/backend/base/langflow/components/Notion/page_content_viewer.py
@@ -5,8 +5,8 @@ from pydantic import BaseModel, Field
from langflow.base.langchain_utilities.model import LCToolComponent
from langflow.field_typing import Tool
-from langflow.inputs import SecretStrInput, StrInput
-from langflow.schema import Data
+from langflow.inputs.inputs import SecretStrInput, StrInput
+from langflow.schema.data import Data
class NotionPageContent(LCToolComponent):
diff --git a/src/backend/base/langflow/components/Notion/search.py b/src/backend/base/langflow/components/Notion/search.py
index 7d93a3fa7..3512b35d1 100644
--- a/src/backend/base/langflow/components/Notion/search.py
+++ b/src/backend/base/langflow/components/Notion/search.py
@@ -6,8 +6,8 @@ from pydantic import BaseModel, Field
from langflow.base.langchain_utilities.model import LCToolComponent
from langflow.field_typing import Tool
-from langflow.inputs import DropdownInput, SecretStrInput, StrInput
-from langflow.schema import Data
+from langflow.inputs.inputs import DropdownInput, SecretStrInput, StrInput
+from langflow.schema.data import Data
class NotionSearch(LCToolComponent):
diff --git a/src/backend/base/langflow/components/Notion/update_page_property.py b/src/backend/base/langflow/components/Notion/update_page_property.py
index fe777dca9..15a4a8228 100644
--- a/src/backend/base/langflow/components/Notion/update_page_property.py
+++ b/src/backend/base/langflow/components/Notion/update_page_property.py
@@ -8,8 +8,8 @@ from pydantic import BaseModel, Field
from langflow.base.langchain_utilities.model import LCToolComponent
from langflow.field_typing import Tool
-from langflow.inputs import MultilineInput, SecretStrInput, StrInput
-from langflow.schema import Data
+from langflow.inputs.inputs import MultilineInput, SecretStrInput, StrInput
+from langflow.schema.data import Data
class NotionPageUpdate(LCToolComponent):
diff --git a/src/backend/base/langflow/components/agentql/agentql_api.py b/src/backend/base/langflow/components/agentql/agentql_api.py
index 51377be44..578c5e95d 100644
--- a/src/backend/base/langflow/components/agentql/agentql_api.py
+++ b/src/backend/base/langflow/components/agentql/agentql_api.py
@@ -1,7 +1,7 @@
import httpx
from loguru import logger
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.field_typing.range_spec import RangeSpec
from langflow.io import (
BoolInput,
@@ -12,7 +12,7 @@ from langflow.io import (
Output,
SecretStrInput,
)
-from langflow.schema import Data
+from langflow.schema.data import Data
class AgentQL(Component):
diff --git a/src/backend/base/langflow/components/agents/agent.py b/src/backend/base/langflow/components/agents/agent.py
index 672e7b4b5..00b018615 100644
--- a/src/backend/base/langflow/components/agents/agent.py
+++ b/src/backend/base/langflow/components/agents/agent.py
@@ -9,7 +9,7 @@ from langflow.base.models.model_input_constants import (
MODELS_METADATA,
)
from langflow.base.models.model_utils import get_model_name
-from langflow.components.helpers import CurrentDateComponent
+from langflow.components.helpers.current_date import CurrentDateComponent
from langflow.components.helpers.memory import MemoryComponent
from langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent
from langflow.custom.custom_component.component import _get_component_toolkit
diff --git a/src/backend/base/langflow/components/amazon/amazon_bedrock_embedding.py b/src/backend/base/langflow/components/amazon/amazon_bedrock_embedding.py
index 266f6b11f..f7de6cf00 100644
--- a/src/backend/base/langflow/components/amazon/amazon_bedrock_embedding.py
+++ b/src/backend/base/langflow/components/amazon/amazon_bedrock_embedding.py
@@ -1,7 +1,7 @@
from langflow.base.models.aws_constants import AWS_EMBEDDING_MODEL_IDS, AWS_REGIONS
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import Embeddings
-from langflow.inputs import SecretStrInput
+from langflow.inputs.inputs import SecretStrInput
from langflow.io import DropdownInput, MessageTextInput, Output
diff --git a/src/backend/base/langflow/components/amazon/amazon_bedrock_model.py b/src/backend/base/langflow/components/amazon/amazon_bedrock_model.py
index fa294c94f..bc01236c8 100644
--- a/src/backend/base/langflow/components/amazon/amazon_bedrock_model.py
+++ b/src/backend/base/langflow/components/amazon/amazon_bedrock_model.py
@@ -1,7 +1,7 @@
from langflow.base.models.aws_constants import AWS_REGIONS, AWS_MODEL_IDs
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
-from langflow.inputs import MessageTextInput, SecretStrInput
+from langflow.inputs.inputs import MessageTextInput, SecretStrInput
from langflow.io import DictInput, DropdownInput
diff --git a/src/backend/base/langflow/components/amazon/s3_bucket_uploader.py b/src/backend/base/langflow/components/amazon/s3_bucket_uploader.py
index 7931d4512..c0d84dc8a 100644
--- a/src/backend/base/langflow/components/amazon/s3_bucket_uploader.py
+++ b/src/backend/base/langflow/components/amazon/s3_bucket_uploader.py
@@ -3,7 +3,7 @@ from typing import Any
import boto3
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import (
BoolInput,
DropdownInput,
diff --git a/src/backend/base/langflow/components/apify/apify_actor.py b/src/backend/base/langflow/components/apify/apify_actor.py
index 2c7880ff8..39a8412d8 100644
--- a/src/backend/base/langflow/components/apify/apify_actor.py
+++ b/src/backend/base/langflow/components/apify/apify_actor.py
@@ -7,11 +7,11 @@ from langchain_community.document_loaders.apify_dataset import ApifyDatasetLoade
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field, field_serializer
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.field_typing import Tool
from langflow.inputs.inputs import BoolInput
from langflow.io import MultilineInput, Output, SecretStrInput, StrInput
-from langflow.schema import Data
+from langflow.schema.data import Data
MAX_DESCRIPTION_LEN = 250
diff --git a/src/backend/base/langflow/components/assemblyai/assemblyai_get_subtitles.py b/src/backend/base/langflow/components/assemblyai/assemblyai_get_subtitles.py
index 12dfb5624..3d477497f 100644
--- a/src/backend/base/langflow/components/assemblyai/assemblyai_get_subtitles.py
+++ b/src/backend/base/langflow/components/assemblyai/assemblyai_get_subtitles.py
@@ -1,9 +1,9 @@
import assemblyai as aai
from loguru import logger
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import DataInput, DropdownInput, IntInput, Output, SecretStrInput
-from langflow.schema import Data
+from langflow.schema.data import Data
class AssemblyAIGetSubtitles(Component):
diff --git a/src/backend/base/langflow/components/assemblyai/assemblyai_lemur.py b/src/backend/base/langflow/components/assemblyai/assemblyai_lemur.py
index 059914cda..ec5bbed5a 100644
--- a/src/backend/base/langflow/components/assemblyai/assemblyai_lemur.py
+++ b/src/backend/base/langflow/components/assemblyai/assemblyai_lemur.py
@@ -1,9 +1,9 @@
import assemblyai as aai
from loguru import logger
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import DataInput, DropdownInput, FloatInput, IntInput, MultilineInput, Output, SecretStrInput
-from langflow.schema import Data
+from langflow.schema.data import Data
class AssemblyAILeMUR(Component):
diff --git a/src/backend/base/langflow/components/assemblyai/assemblyai_list_transcripts.py b/src/backend/base/langflow/components/assemblyai/assemblyai_list_transcripts.py
index 369b85de2..a9c101b0a 100644
--- a/src/backend/base/langflow/components/assemblyai/assemblyai_list_transcripts.py
+++ b/src/backend/base/langflow/components/assemblyai/assemblyai_list_transcripts.py
@@ -1,9 +1,9 @@
import assemblyai as aai
from loguru import logger
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, Output, SecretStrInput
-from langflow.schema import Data
+from langflow.schema.data import Data
class AssemblyAIListTranscripts(Component):
diff --git a/src/backend/base/langflow/components/assemblyai/assemblyai_poll_transcript.py b/src/backend/base/langflow/components/assemblyai/assemblyai_poll_transcript.py
index f2ab67839..e3795f849 100644
--- a/src/backend/base/langflow/components/assemblyai/assemblyai_poll_transcript.py
+++ b/src/backend/base/langflow/components/assemblyai/assemblyai_poll_transcript.py
@@ -1,10 +1,10 @@
import assemblyai as aai
from loguru import logger
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.field_typing.range_spec import RangeSpec
from langflow.io import DataInput, FloatInput, Output, SecretStrInput
-from langflow.schema import Data
+from langflow.schema.data import Data
class AssemblyAITranscriptionJobPoller(Component):
diff --git a/src/backend/base/langflow/components/assemblyai/assemblyai_start_transcript.py b/src/backend/base/langflow/components/assemblyai/assemblyai_start_transcript.py
index 48cf11ef5..36da3e3cc 100644
--- a/src/backend/base/langflow/components/assemblyai/assemblyai_start_transcript.py
+++ b/src/backend/base/langflow/components/assemblyai/assemblyai_start_transcript.py
@@ -3,9 +3,9 @@ from pathlib import Path
import assemblyai as aai
from loguru import logger
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import BoolInput, DropdownInput, FileInput, MessageTextInput, Output, SecretStrInput
-from langflow.schema import Data
+from langflow.schema.data import Data
class AssemblyAITranscriptionJobCreator(Component):
diff --git a/src/backend/base/langflow/components/composio/composio_api.py b/src/backend/base/langflow/components/composio/composio_api.py
index cef283588..a7102c320 100644
--- a/src/backend/base/langflow/components/composio/composio_api.py
+++ b/src/backend/base/langflow/components/composio/composio_api.py
@@ -10,7 +10,7 @@ from langchain_core.tools import Tool
# Local imports
from langflow.base.langchain_utilities.model import LCToolComponent
-from langflow.inputs import (
+from langflow.inputs.inputs import (
ConnectionInput,
MessageTextInput,
SecretStrInput,
diff --git a/src/backend/base/langflow/components/composio/gmail_composio.py b/src/backend/base/langflow/components/composio/gmail_composio.py
index e7f2f2d85..f1e5213f7 100644
--- a/src/backend/base/langflow/components/composio/gmail_composio.py
+++ b/src/backend/base/langflow/components/composio/gmail_composio.py
@@ -4,7 +4,7 @@ from typing import Any
from composio import Action
from langflow.base.composio.composio_base import ComposioBaseComponent
-from langflow.inputs import (
+from langflow.inputs.inputs import (
BoolInput,
FileInput,
IntInput,
diff --git a/src/backend/base/langflow/components/confluence/confluence.py b/src/backend/base/langflow/components/confluence/confluence.py
index 418bfe0ad..06f735c87 100644
--- a/src/backend/base/langflow/components/confluence/confluence.py
+++ b/src/backend/base/langflow/components/confluence/confluence.py
@@ -1,9 +1,9 @@
from langchain_community.document_loaders import ConfluenceLoader
from langchain_community.document_loaders.confluence import ContentFormat
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import BoolInput, DropdownInput, IntInput, Output, SecretStrInput, StrInput
-from langflow.schema import Data
+from langflow.schema.data import Data
class ConfluenceComponent(Component):
diff --git a/src/backend/base/langflow/components/crewai/crewai.py b/src/backend/base/langflow/components/crewai/crewai.py
index e942ab236..faacd2550 100644
--- a/src/backend/base/langflow/components/crewai/crewai.py
+++ b/src/backend/base/langflow/components/crewai/crewai.py
@@ -1,7 +1,7 @@
from crewai import Agent
from langflow.base.agents.crewai.crew import convert_llm, convert_tools
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import BoolInput, DictInput, HandleInput, MultilineInput, Output
diff --git a/src/backend/base/langflow/components/crewai/hierarchical_task.py b/src/backend/base/langflow/components/crewai/hierarchical_task.py
index e5a73e851..13ef35f94 100644
--- a/src/backend/base/langflow/components/crewai/hierarchical_task.py
+++ b/src/backend/base/langflow/components/crewai/hierarchical_task.py
@@ -1,5 +1,5 @@
from langflow.base.agents.crewai.tasks import HierarchicalTask
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import HandleInput, MultilineInput, Output
diff --git a/src/backend/base/langflow/components/crewai/sequential_task.py b/src/backend/base/langflow/components/crewai/sequential_task.py
index 81add12c6..95edb3581 100644
--- a/src/backend/base/langflow/components/crewai/sequential_task.py
+++ b/src/backend/base/langflow/components/crewai/sequential_task.py
@@ -1,5 +1,5 @@
from langflow.base.agents.crewai.tasks import SequentialTask
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import BoolInput, HandleInput, MultilineInput, Output
diff --git a/src/backend/base/langflow/components/crewai/sequential_task_agent.py b/src/backend/base/langflow/components/crewai/sequential_task_agent.py
index bb3e5c59c..5764ddcde 100644
--- a/src/backend/base/langflow/components/crewai/sequential_task_agent.py
+++ b/src/backend/base/langflow/components/crewai/sequential_task_agent.py
@@ -1,7 +1,7 @@
from crewai import Agent, Task
from langflow.base.agents.crewai.tasks import SequentialTask
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import BoolInput, DictInput, HandleInput, MultilineInput, Output
diff --git a/src/backend/base/langflow/components/custom_component/custom_component.py b/src/backend/base/langflow/components/custom_component/custom_component.py
index 305200d4d..6870ad7cd 100644
--- a/src/backend/base/langflow/components/custom_component/custom_component.py
+++ b/src/backend/base/langflow/components/custom_component/custom_component.py
@@ -1,7 +1,7 @@
# from langflow.field_typing import Data
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import MessageTextInput, Output
-from langflow.schema import Data
+from langflow.schema.data import Data
class CustomComponent(Component):
diff --git a/src/backend/base/langflow/components/data/api_request.py b/src/backend/base/langflow/components/data/api_request.py
index 5f4001df5..a3f305088 100644
--- a/src/backend/base/langflow/components/data/api_request.py
+++ b/src/backend/base/langflow/components/data/api_request.py
@@ -12,7 +12,7 @@ import httpx
import validators
from langflow.base.curl.parse import parse_context
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.inputs.inputs import TabInput
from langflow.io import (
BoolInput,
@@ -24,7 +24,7 @@ from langflow.io import (
Output,
TableInput,
)
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.schema.dotdict import dotdict
from langflow.services.deps import get_settings_service
from langflow.utils.component_utils import set_current_fields, set_field_advanced, set_field_display
diff --git a/src/backend/base/langflow/components/data/csv_to_data.py b/src/backend/base/langflow/components/data/csv_to_data.py
index 866062a1e..4b95563fc 100644
--- a/src/backend/base/langflow/components/data/csv_to_data.py
+++ b/src/backend/base/langflow/components/data/csv_to_data.py
@@ -2,9 +2,9 @@ import csv
import io
from pathlib import Path
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import FileInput, MessageTextInput, MultilineInput, Output
-from langflow.schema import Data
+from langflow.schema.data import Data
class CSVToDataComponent(Component):
diff --git a/src/backend/base/langflow/components/data/directory.py b/src/backend/base/langflow/components/data/directory.py
index f195d7c3f..896122f96 100644
--- a/src/backend/base/langflow/components/data/directory.py
+++ b/src/backend/base/langflow/components/data/directory.py
@@ -1,9 +1,9 @@
from langflow.base.data.utils import TEXT_FILE_TYPES, parallel_load_data, parse_text_file_to_data, retrieve_file_paths
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import BoolInput, IntInput, MessageTextInput, MultiselectInput
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.schema.dataframe import DataFrame
-from langflow.template import Output
+from langflow.template.field.base import Output
class DirectoryComponent(Component):
diff --git a/src/backend/base/langflow/components/data/file.py b/src/backend/base/langflow/components/data/file.py
index 6c30cf278..95ba804dc 100644
--- a/src/backend/base/langflow/components/data/file.py
+++ b/src/backend/base/langflow/components/data/file.py
@@ -1,7 +1,7 @@
-from langflow.base.data import BaseFileComponent
+from langflow.base.data.base_file import BaseFileComponent
from langflow.base.data.utils import TEXT_FILE_TYPES, parallel_load_data, parse_text_file_to_data
from langflow.io import BoolInput, IntInput
-from langflow.schema import Data
+from langflow.schema.data import Data
class FileComponent(BaseFileComponent):
diff --git a/src/backend/base/langflow/components/data/json_to_data.py b/src/backend/base/langflow/components/data/json_to_data.py
index 9374cddaa..a41fb3a21 100644
--- a/src/backend/base/langflow/components/data/json_to_data.py
+++ b/src/backend/base/langflow/components/data/json_to_data.py
@@ -3,9 +3,9 @@ from pathlib import Path
from json_repair import repair_json
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import FileInput, MessageTextInput, MultilineInput, Output
-from langflow.schema import Data
+from langflow.schema.data import Data
class JSONToDataComponent(Component):
diff --git a/src/backend/base/langflow/components/data/mcp_component.py b/src/backend/base/langflow/components/data/mcp_component.py
index fbe920b08..fb8e60856 100644
--- a/src/backend/base/langflow/components/data/mcp_component.py
+++ b/src/backend/base/langflow/components/data/mcp_component.py
@@ -11,13 +11,12 @@ from langflow.base.mcp.util import (
create_tool_coroutine,
create_tool_func,
)
-from langflow.custom import Component
-from langflow.inputs import DropdownInput, TableInput
-from langflow.inputs.inputs import InputTypes
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import DropdownInput, InputTypes, TableInput
from langflow.io import MessageTextInput, MultilineInput, Output, TabInput
from langflow.io.schema import flatten_schema, schema_to_langflow_inputs
from langflow.logging import logger
-from langflow.schema import DataFrame
+from langflow.schema.dataframe import DataFrame
def maybe_unflatten_dict(flat: dict[str, Any]) -> dict[str, Any]:
diff --git a/src/backend/base/langflow/components/data/url.py b/src/backend/base/langflow/components/data/url.py
index 8d1b1f167..013f63837 100644
--- a/src/backend/base/langflow/components/data/url.py
+++ b/src/backend/base/langflow/components/data/url.py
@@ -5,11 +5,12 @@ from bs4 import BeautifulSoup
from langchain_community.document_loaders import RecursiveUrlLoader
from loguru import logger
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.field_typing.range_spec import RangeSpec
from langflow.helpers.data import safe_convert
from langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, Output, SliderInput, TableInput
-from langflow.schema import DataFrame, Message
+from langflow.schema.dataframe import DataFrame
+from langflow.schema.message import Message
from langflow.services.deps import get_settings_service
# Constants
diff --git a/src/backend/base/langflow/components/data/webhook.py b/src/backend/base/langflow/components/data/webhook.py
index 749626130..46c693a77 100644
--- a/src/backend/base/langflow/components/data/webhook.py
+++ b/src/backend/base/langflow/components/data/webhook.py
@@ -1,8 +1,8 @@
import json
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import MultilineInput, Output
-from langflow.schema import Data
+from langflow.schema.data import Data
class WebhookComponent(Component):
diff --git a/src/backend/base/langflow/components/datastax/astra_assistant_manager.py b/src/backend/base/langflow/components/datastax/astra_assistant_manager.py
index 74be041f9..6e4ea037e 100644
--- a/src/backend/base/langflow/components/datastax/astra_assistant_manager.py
+++ b/src/backend/base/langflow/components/datastax/astra_assistant_manager.py
@@ -14,11 +14,11 @@ from langflow.base.astra_assistants.util import (
wrap_base_tool_as_tool_interface,
)
from langflow.custom.custom_component.component_with_cache import ComponentWithCache
-from langflow.inputs import DropdownInput, FileInput, HandleInput, MultilineInput
+from langflow.inputs.inputs import DropdownInput, FileInput, HandleInput, MultilineInput
from langflow.memory import delete_message
from langflow.schema.content_block import ContentBlock
from langflow.schema.message import Message
-from langflow.template import Output
+from langflow.template.field.base import Output
from langflow.utils.constants import MESSAGE_SENDER_AI
if TYPE_CHECKING:
diff --git a/src/backend/base/langflow/components/datastax/astradb_cql.py b/src/backend/base/langflow/components/datastax/astradb_cql.py
index 1408a7b9c..8f00db720 100644
--- a/src/backend/base/langflow/components/datastax/astradb_cql.py
+++ b/src/backend/base/langflow/components/datastax/astradb_cql.py
@@ -11,7 +11,7 @@ from langchain_core.tools import StructuredTool, Tool
from langflow.base.langchain_utilities.model import LCToolComponent
from langflow.io import DictInput, IntInput, SecretStrInput, StrInput, TableInput
from langflow.logging import logger
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.schema.table import EditMode
diff --git a/src/backend/base/langflow/components/datastax/astradb_tool.py b/src/backend/base/langflow/components/datastax/astradb_tool.py
index 86816b330..e7a7df31d 100644
--- a/src/backend/base/langflow/components/datastax/astradb_tool.py
+++ b/src/backend/base/langflow/components/datastax/astradb_tool.py
@@ -10,7 +10,7 @@ from langchain_core.tools import StructuredTool, Tool
from langflow.base.langchain_utilities.model import LCToolComponent
from langflow.io import BoolInput, DictInput, HandleInput, IntInput, SecretStrInput, StrInput, TableInput
from langflow.logging import logger
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.schema.table import EditMode
diff --git a/src/backend/base/langflow/components/datastax/create_assistant.py b/src/backend/base/langflow/components/datastax/create_assistant.py
index 1798892f5..daa9fa12b 100644
--- a/src/backend/base/langflow/components/datastax/create_assistant.py
+++ b/src/backend/base/langflow/components/datastax/create_assistant.py
@@ -2,9 +2,9 @@ from loguru import logger
from langflow.base.astra_assistants.util import get_patched_openai_client
from langflow.custom.custom_component.component_with_cache import ComponentWithCache
-from langflow.inputs import MultilineInput, StrInput
+from langflow.inputs.inputs import MultilineInput, StrInput
from langflow.schema.message import Message
-from langflow.template import Output
+from langflow.template.field.base import Output
class AssistantsCreateAssistant(ComponentWithCache):
diff --git a/src/backend/base/langflow/components/datastax/create_thread.py b/src/backend/base/langflow/components/datastax/create_thread.py
index c41bee0fe..0d4341db4 100644
--- a/src/backend/base/langflow/components/datastax/create_thread.py
+++ b/src/backend/base/langflow/components/datastax/create_thread.py
@@ -1,8 +1,8 @@
from langflow.base.astra_assistants.util import get_patched_openai_client
from langflow.custom.custom_component.component_with_cache import ComponentWithCache
-from langflow.inputs import MultilineInput
+from langflow.inputs.inputs import MultilineInput
from langflow.schema.message import Message
-from langflow.template import Output
+from langflow.template.field.base import Output
class AssistantsCreateThread(ComponentWithCache):
diff --git a/src/backend/base/langflow/components/datastax/dotenv.py b/src/backend/base/langflow/components/datastax/dotenv.py
index 83ba991ec..706e391f2 100644
--- a/src/backend/base/langflow/components/datastax/dotenv.py
+++ b/src/backend/base/langflow/components/datastax/dotenv.py
@@ -2,10 +2,10 @@ import io
from dotenv import load_dotenv
-from langflow.custom import Component
-from langflow.inputs import MultilineSecretInput
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import MultilineSecretInput
from langflow.schema.message import Message
-from langflow.template import Output
+from langflow.template.field.base import Output
class Dotenv(Component):
diff --git a/src/backend/base/langflow/components/datastax/get_assistant.py b/src/backend/base/langflow/components/datastax/get_assistant.py
index 8d6f05ed5..149309370 100644
--- a/src/backend/base/langflow/components/datastax/get_assistant.py
+++ b/src/backend/base/langflow/components/datastax/get_assistant.py
@@ -1,8 +1,8 @@
from langflow.base.astra_assistants.util import get_patched_openai_client
from langflow.custom.custom_component.component_with_cache import ComponentWithCache
-from langflow.inputs import MultilineInput, StrInput
+from langflow.inputs.inputs import MultilineInput, StrInput
from langflow.schema.message import Message
-from langflow.template import Output
+from langflow.template.field.base import Output
class AssistantsGetAssistantName(ComponentWithCache):
diff --git a/src/backend/base/langflow/components/datastax/getenvvar.py b/src/backend/base/langflow/components/datastax/getenvvar.py
index 0076cd651..9becc8173 100644
--- a/src/backend/base/langflow/components/datastax/getenvvar.py
+++ b/src/backend/base/langflow/components/datastax/getenvvar.py
@@ -1,9 +1,9 @@
import os
-from langflow.custom import Component
-from langflow.inputs import StrInput
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import StrInput
from langflow.schema.message import Message
-from langflow.template import Output
+from langflow.template.field.base import Output
class GetEnvVar(Component):
diff --git a/src/backend/base/langflow/components/datastax/run.py b/src/backend/base/langflow/components/datastax/run.py
index e065563cc..bf07fe7ff 100644
--- a/src/backend/base/langflow/components/datastax/run.py
+++ b/src/backend/base/langflow/components/datastax/run.py
@@ -4,10 +4,10 @@ from openai.lib.streaming import AssistantEventHandler
from langflow.base.astra_assistants.util import get_patched_openai_client
from langflow.custom.custom_component.component_with_cache import ComponentWithCache
-from langflow.inputs import MultilineInput
-from langflow.schema import dotdict
+from langflow.inputs.inputs import MultilineInput
+from langflow.schema.dotdict import dotdict
from langflow.schema.message import Message
-from langflow.template import Output
+from langflow.template.field.base import Output
class AssistantsRun(ComponentWithCache):
diff --git a/src/backend/base/langflow/components/deactivated/code_block_extractor.py b/src/backend/base/langflow/components/deactivated/code_block_extractor.py
index f1383fc8a..d89ffac76 100644
--- a/src/backend/base/langflow/components/deactivated/code_block_extractor.py
+++ b/src/backend/base/langflow/components/deactivated/code_block_extractor.py
@@ -1,6 +1,6 @@
import re
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.field_typing import Input, Output, Text
diff --git a/src/backend/base/langflow/components/deactivated/documents_to_data.py b/src/backend/base/langflow/components/deactivated/documents_to_data.py
index 5eaf6b60e..a15f02ffe 100644
--- a/src/backend/base/langflow/components/deactivated/documents_to_data.py
+++ b/src/backend/base/langflow/components/deactivated/documents_to_data.py
@@ -1,7 +1,7 @@
from langchain_core.documents import Document
-from langflow.custom import CustomComponent
-from langflow.schema import Data
+from langflow.custom.custom_component.custom_component import CustomComponent
+from langflow.schema.data import Data
class DocumentsToDataComponent(CustomComponent):
diff --git a/src/backend/base/langflow/components/deactivated/embed.py b/src/backend/base/langflow/components/deactivated/embed.py
index dee38e234..7ca021fad 100644
--- a/src/backend/base/langflow/components/deactivated/embed.py
+++ b/src/backend/base/langflow/components/deactivated/embed.py
@@ -1,6 +1,6 @@
-from langflow.custom import CustomComponent
+from langflow.custom.custom_component.custom_component import CustomComponent
from langflow.field_typing import Embeddings
-from langflow.schema import Data
+from langflow.schema.data import Data
class EmbedComponent(CustomComponent):
diff --git a/src/backend/base/langflow/components/deactivated/extract_key_from_data.py b/src/backend/base/langflow/components/deactivated/extract_key_from_data.py
index 3882d0d98..188b5c75f 100644
--- a/src/backend/base/langflow/components/deactivated/extract_key_from_data.py
+++ b/src/backend/base/langflow/components/deactivated/extract_key_from_data.py
@@ -1,5 +1,5 @@
-from langflow.custom import CustomComponent
-from langflow.schema import Data
+from langflow.custom.custom_component.custom_component import CustomComponent
+from langflow.schema.data import Data
class ExtractKeyFromDataComponent(CustomComponent):
diff --git a/src/backend/base/langflow/components/deactivated/list_flows.py b/src/backend/base/langflow/components/deactivated/list_flows.py
index 70d77d8e4..a4ccd024c 100644
--- a/src/backend/base/langflow/components/deactivated/list_flows.py
+++ b/src/backend/base/langflow/components/deactivated/list_flows.py
@@ -1,5 +1,5 @@
-from langflow.custom import CustomComponent
-from langflow.schema import Data
+from langflow.custom.custom_component.custom_component import CustomComponent
+from langflow.schema.data import Data
class ListFlowsComponent(CustomComponent):
diff --git a/src/backend/base/langflow/components/deactivated/mcp_sse.py b/src/backend/base/langflow/components/deactivated/mcp_sse.py
index e3588b2f1..b68910206 100644
--- a/src/backend/base/langflow/components/deactivated/mcp_sse.py
+++ b/src/backend/base/langflow/components/deactivated/mcp_sse.py
@@ -9,7 +9,7 @@ from langflow.base.mcp.util import (
create_tool_coroutine,
create_tool_func,
)
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.field_typing import Tool
from langflow.io import MessageTextInput, Output
diff --git a/src/backend/base/langflow/components/deactivated/mcp_stdio.py b/src/backend/base/langflow/components/deactivated/mcp_stdio.py
index e68d47f0d..059c4dec6 100644
--- a/src/backend/base/langflow/components/deactivated/mcp_stdio.py
+++ b/src/backend/base/langflow/components/deactivated/mcp_stdio.py
@@ -9,7 +9,7 @@ from langflow.base.mcp.util import (
create_tool_coroutine,
create_tool_func,
)
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.field_typing import Tool
from langflow.io import MessageTextInput, Output
diff --git a/src/backend/base/langflow/components/deactivated/merge_data.py b/src/backend/base/langflow/components/deactivated/merge_data.py
index 8c57a7888..f82124b19 100644
--- a/src/backend/base/langflow/components/deactivated/merge_data.py
+++ b/src/backend/base/langflow/components/deactivated/merge_data.py
@@ -1,8 +1,8 @@
from loguru import logger
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import DataInput, Output
-from langflow.schema import Data
+from langflow.schema.data import Data
class MergeDataComponent(Component):
diff --git a/src/backend/base/langflow/components/deactivated/message.py b/src/backend/base/langflow/components/deactivated/message.py
index 1ab0bda97..0a479d8ed 100644
--- a/src/backend/base/langflow/components/deactivated/message.py
+++ b/src/backend/base/langflow/components/deactivated/message.py
@@ -1,4 +1,4 @@
-from langflow.custom import CustomComponent
+from langflow.custom.custom_component.custom_component import CustomComponent
from langflow.schema.message import Message
from langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_USER
diff --git a/src/backend/base/langflow/components/deactivated/selective_passthrough.py b/src/backend/base/langflow/components/deactivated/selective_passthrough.py
index f82842993..6402af7ec 100644
--- a/src/backend/base/langflow/components/deactivated/selective_passthrough.py
+++ b/src/backend/base/langflow/components/deactivated/selective_passthrough.py
@@ -1,4 +1,4 @@
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.field_typing import Text
from langflow.io import BoolInput, DropdownInput, MessageTextInput, Output
diff --git a/src/backend/base/langflow/components/deactivated/should_run_next.py b/src/backend/base/langflow/components/deactivated/should_run_next.py
index a65687e8f..2541c923c 100644
--- a/src/backend/base/langflow/components/deactivated/should_run_next.py
+++ b/src/backend/base/langflow/components/deactivated/should_run_next.py
@@ -1,7 +1,7 @@
from langchain_core.messages import BaseMessage
from langchain_core.prompts import PromptTemplate
-from langflow.custom import CustomComponent
+from langflow.custom.custom_component.custom_component import CustomComponent
from langflow.field_typing import LanguageModel, Text
diff --git a/src/backend/base/langflow/components/deactivated/split_text.py b/src/backend/base/langflow/components/deactivated/split_text.py
index 36157538e..acb215adc 100644
--- a/src/backend/base/langflow/components/deactivated/split_text.py
+++ b/src/backend/base/langflow/components/deactivated/split_text.py
@@ -1,8 +1,8 @@
from langchain_text_splitters import CharacterTextSplitter
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import HandleInput, IntInput, MessageTextInput, Output
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.utils.util import unescape_string
diff --git a/src/backend/base/langflow/components/deactivated/store_message.py b/src/backend/base/langflow/components/deactivated/store_message.py
index 0d68ece68..744c55cee 100644
--- a/src/backend/base/langflow/components/deactivated/store_message.py
+++ b/src/backend/base/langflow/components/deactivated/store_message.py
@@ -1,4 +1,4 @@
-from langflow.custom import CustomComponent
+from langflow.custom.custom_component.custom_component import CustomComponent
from langflow.memory import aget_messages, astore_message
from langflow.schema.message import Message
diff --git a/src/backend/base/langflow/components/deactivated/sub_flow.py b/src/backend/base/langflow/components/deactivated/sub_flow.py
index a4291a78d..faa6be35f 100644
--- a/src/backend/base/langflow/components/deactivated/sub_flow.py
+++ b/src/backend/base/langflow/components/deactivated/sub_flow.py
@@ -3,11 +3,11 @@ from typing import TYPE_CHECKING, Any
from loguru import logger
from langflow.base.flow_processing.utils import build_data_from_result_data
-from langflow.custom import CustomComponent
+from langflow.custom.custom_component.custom_component import CustomComponent
from langflow.graph.graph.base import Graph
from langflow.graph.vertex.base import Vertex
from langflow.helpers.flow import get_flow_inputs
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.schema.dotdict import dotdict
from langflow.template.field.base import Input
diff --git a/src/backend/base/langflow/components/embeddings/astra_vectorize.py b/src/backend/base/langflow/components/embeddings/astra_vectorize.py
index f6721f95e..b1334a378 100644
--- a/src/backend/base/langflow/components/embeddings/astra_vectorize.py
+++ b/src/backend/base/langflow/components/embeddings/astra_vectorize.py
@@ -1,6 +1,6 @@
from typing import Any
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.inputs.inputs import DictInput, DropdownInput, MessageTextInput, SecretStrInput
from langflow.template.field.base import Output
diff --git a/src/backend/base/langflow/components/embeddings/google_generative_ai.py b/src/backend/base/langflow/components/embeddings/google_generative_ai.py
index 2284d4055..1d8c602fa 100644
--- a/src/backend/base/langflow/components/embeddings/google_generative_ai.py
+++ b/src/backend/base/langflow/components/embeddings/google_generative_ai.py
@@ -6,7 +6,7 @@ from langchain_core.embeddings import Embeddings
from langchain_google_genai import GoogleGenerativeAIEmbeddings
from langchain_google_genai._common import GoogleGenerativeAIError
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import MessageTextInput, Output, SecretStrInput
MIN_DIMENSION_ERROR = "Output dimensionality must be at least 1"
diff --git a/src/backend/base/langflow/components/embeddings/similarity.py b/src/backend/base/langflow/components/embeddings/similarity.py
index 1c78925fa..fc0e5dcf6 100644
--- a/src/backend/base/langflow/components/embeddings/similarity.py
+++ b/src/backend/base/langflow/components/embeddings/similarity.py
@@ -2,9 +2,9 @@ from typing import Any
import numpy as np
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import DataInput, DropdownInput, Output
-from langflow.schema import Data
+from langflow.schema.data import Data
class EmbeddingSimilarityComponent(Component):
diff --git a/src/backend/base/langflow/components/embeddings/text_embedder.py b/src/backend/base/langflow/components/embeddings/text_embedder.py
index a9efb1ffe..f7224ebcb 100644
--- a/src/backend/base/langflow/components/embeddings/text_embedder.py
+++ b/src/backend/base/langflow/components/embeddings/text_embedder.py
@@ -1,9 +1,9 @@
import logging
from typing import TYPE_CHECKING
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import HandleInput, MessageInput, Output
-from langflow.schema import Data
+from langflow.schema.data import Data
if TYPE_CHECKING:
from langflow.field_typing import Embeddings
diff --git a/src/backend/base/langflow/components/firecrawl/firecrawl_crawl_api.py b/src/backend/base/langflow/components/firecrawl/firecrawl_crawl_api.py
index 28e22d746..e58e1e112 100644
--- a/src/backend/base/langflow/components/firecrawl/firecrawl_crawl_api.py
+++ b/src/backend/base/langflow/components/firecrawl/firecrawl_crawl_api.py
@@ -1,8 +1,8 @@
import uuid
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import DataInput, IntInput, MultilineInput, Output, SecretStrInput, StrInput
-from langflow.schema import Data
+from langflow.schema.data import Data
class FirecrawlCrawlApi(Component):
diff --git a/src/backend/base/langflow/components/firecrawl/firecrawl_extract_api.py b/src/backend/base/langflow/components/firecrawl/firecrawl_extract_api.py
index e1aa3bb2e..fda1f745a 100644
--- a/src/backend/base/langflow/components/firecrawl/firecrawl_extract_api.py
+++ b/src/backend/base/langflow/components/firecrawl/firecrawl_extract_api.py
@@ -1,6 +1,6 @@
from loguru import logger
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import (
BoolInput,
DataInput,
@@ -8,7 +8,7 @@ from langflow.io import (
Output,
SecretStrInput,
)
-from langflow.schema import Data
+from langflow.schema.data import Data
class FirecrawlExtractApi(Component):
diff --git a/src/backend/base/langflow/components/firecrawl/firecrawl_map_api.py b/src/backend/base/langflow/components/firecrawl/firecrawl_map_api.py
index 73dd55a02..d28b74e14 100644
--- a/src/backend/base/langflow/components/firecrawl/firecrawl_map_api.py
+++ b/src/backend/base/langflow/components/firecrawl/firecrawl_map_api.py
@@ -1,11 +1,11 @@
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import (
BoolInput,
MultilineInput,
Output,
SecretStrInput,
)
-from langflow.schema import Data
+from langflow.schema.data import Data
class FirecrawlMapApi(Component):
diff --git a/src/backend/base/langflow/components/firecrawl/firecrawl_scrape_api.py b/src/backend/base/langflow/components/firecrawl/firecrawl_scrape_api.py
index d9989449f..e182e9292 100644
--- a/src/backend/base/langflow/components/firecrawl/firecrawl_scrape_api.py
+++ b/src/backend/base/langflow/components/firecrawl/firecrawl_scrape_api.py
@@ -1,4 +1,4 @@
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import (
DataInput,
IntInput,
@@ -6,7 +6,7 @@ from langflow.io import (
Output,
SecretStrInput,
)
-from langflow.schema import Data
+from langflow.schema.data import Data
class FirecrawlScrapeApi(Component):
diff --git a/src/backend/base/langflow/components/git/git.py b/src/backend/base/langflow/components/git/git.py
index 368062ab1..71cf311ed 100644
--- a/src/backend/base/langflow/components/git/git.py
+++ b/src/backend/base/langflow/components/git/git.py
@@ -7,9 +7,9 @@ from pathlib import Path
import anyio
from langchain_community.document_loaders.git import GitLoader
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import DropdownInput, MessageTextInput, Output
-from langflow.schema import Data
+from langflow.schema.data import Data
class GitLoaderComponent(Component):
diff --git a/src/backend/base/langflow/components/git/gitextractor.py b/src/backend/base/langflow/components/git/gitextractor.py
index 30584c776..48b08c1d3 100644
--- a/src/backend/base/langflow/components/git/gitextractor.py
+++ b/src/backend/base/langflow/components/git/gitextractor.py
@@ -7,9 +7,9 @@ from pathlib import Path
import aiofiles
import git
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import MessageTextInput, Output
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.schema.message import Message
diff --git a/src/backend/base/langflow/components/google/gmail.py b/src/backend/base/langflow/components/google/gmail.py
index 05a01794c..867257776 100644
--- a/src/backend/base/langflow/components/google/gmail.py
+++ b/src/backend/base/langflow/components/google/gmail.py
@@ -13,11 +13,11 @@ from langchain_core.messages import HumanMessage
from langchain_google_community.gmail.loader import GMailLoader
from loguru import logger
-from langflow.custom import Component
-from langflow.inputs import MessageTextInput
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import MessageTextInput
from langflow.io import SecretStrInput
-from langflow.schema import Data
-from langflow.template import Output
+from langflow.schema.data import Data
+from langflow.template.field.base import Output
class GmailLoaderComponent(Component):
diff --git a/src/backend/base/langflow/components/google/google_drive.py b/src/backend/base/langflow/components/google/google_drive.py
index b87a61aed..4a333c5ab 100644
--- a/src/backend/base/langflow/components/google/google_drive.py
+++ b/src/backend/base/langflow/components/google/google_drive.py
@@ -5,12 +5,12 @@ from google.auth.exceptions import RefreshError
from google.oauth2.credentials import Credentials
from langchain_google_community import GoogleDriveLoader
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.helpers.data import docs_to_data
-from langflow.inputs import MessageTextInput
+from langflow.inputs.inputs import MessageTextInput
from langflow.io import SecretStrInput
-from langflow.schema import Data
-from langflow.template import Output
+from langflow.schema.data import Data
+from langflow.template.field.base import Output
class GoogleDriveComponent(Component):
diff --git a/src/backend/base/langflow/components/google/google_drive_search.py b/src/backend/base/langflow/components/google/google_drive_search.py
index ab0750856..71e7f5c25 100644
--- a/src/backend/base/langflow/components/google/google_drive_search.py
+++ b/src/backend/base/langflow/components/google/google_drive_search.py
@@ -3,11 +3,11 @@ import json
from google.oauth2.credentials import Credentials
from googleapiclient.discovery import build
-from langflow.custom import Component
-from langflow.inputs import DropdownInput, MessageTextInput
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import DropdownInput, MessageTextInput
from langflow.io import SecretStrInput
-from langflow.schema import Data
-from langflow.template import Output
+from langflow.schema.data import Data
+from langflow.template.field.base import Output
class GoogleDriveSearchComponent(Component):
diff --git a/src/backend/base/langflow/components/google/google_oauth_token.py b/src/backend/base/langflow/components/google/google_oauth_token.py
index d9aaaf2a9..968b65597 100644
--- a/src/backend/base/langflow/components/google/google_oauth_token.py
+++ b/src/backend/base/langflow/components/google/google_oauth_token.py
@@ -6,9 +6,9 @@ from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import FileInput, MultilineInput, Output
-from langflow.schema import Data
+from langflow.schema.data import Data
class GoogleOAuthToken(Component):
diff --git a/src/backend/base/langflow/components/helpers/calculator_core.py b/src/backend/base/langflow/components/helpers/calculator_core.py
index 0eef657e9..66190f89c 100644
--- a/src/backend/base/langflow/components/helpers/calculator_core.py
+++ b/src/backend/base/langflow/components/helpers/calculator_core.py
@@ -2,10 +2,10 @@ import ast
import operator
from collections.abc import Callable
-from langflow.custom import Component
-from langflow.inputs import MessageTextInput
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import MessageTextInput
from langflow.io import Output
-from langflow.schema import Data
+from langflow.schema.data import Data
class CalculatorComponent(Component):
diff --git a/src/backend/base/langflow/components/helpers/create_list.py b/src/backend/base/langflow/components/helpers/create_list.py
index 016c4da94..11fe7f16c 100644
--- a/src/backend/base/langflow/components/helpers/create_list.py
+++ b/src/backend/base/langflow/components/helpers/create_list.py
@@ -1,8 +1,8 @@
-from langflow.custom import Component
-from langflow.inputs import StrInput
-from langflow.schema import Data
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import StrInput
+from langflow.schema.data import Data
from langflow.schema.dataframe import DataFrame
-from langflow.template import Output
+from langflow.template.field.base import Output
class CreateListComponent(Component):
diff --git a/src/backend/base/langflow/components/helpers/current_date.py b/src/backend/base/langflow/components/helpers/current_date.py
index 951176dd6..811103fda 100644
--- a/src/backend/base/langflow/components/helpers/current_date.py
+++ b/src/backend/base/langflow/components/helpers/current_date.py
@@ -3,7 +3,7 @@ from zoneinfo import ZoneInfo, available_timezones
from loguru import logger
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import DropdownInput, Output
from langflow.schema.message import Message
diff --git a/src/backend/base/langflow/components/helpers/id_generator.py b/src/backend/base/langflow/components/helpers/id_generator.py
index 1cfdb16a6..a2f9e251f 100644
--- a/src/backend/base/langflow/components/helpers/id_generator.py
+++ b/src/backend/base/langflow/components/helpers/id_generator.py
@@ -3,9 +3,9 @@ from typing import Any
from typing_extensions import override
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import MessageTextInput, Output
-from langflow.schema import dotdict
+from langflow.schema.dotdict import dotdict
from langflow.schema.message import Message
diff --git a/src/backend/base/langflow/components/helpers/memory.py b/src/backend/base/langflow/components/helpers/memory.py
index 1a223e5de..4c1b9d8d4 100644
--- a/src/backend/base/langflow/components/helpers/memory.py
+++ b/src/backend/base/langflow/components/helpers/memory.py
@@ -1,12 +1,13 @@
from typing import Any, cast
-from langflow.custom import Component
-from langflow.inputs import HandleInput
-from langflow.io import DropdownInput, IntInput, MessageTextInput, MultilineInput, Output, TabInput
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import DropdownInput, HandleInput, IntInput, MessageTextInput, MultilineInput, TabInput
from langflow.memory import aget_messages, astore_message
-from langflow.schema import Data, dotdict
+from langflow.schema.data import Data
from langflow.schema.dataframe import DataFrame
+from langflow.schema.dotdict import dotdict
from langflow.schema.message import Message
+from langflow.template.field.base import Output
from langflow.utils.component_utils import set_current_fields, set_field_display
from langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI, MESSAGE_SENDER_USER
diff --git a/src/backend/base/langflow/components/helpers/store_message.py b/src/backend/base/langflow/components/helpers/store_message.py
index 40c4dc574..c1db3da3d 100644
--- a/src/backend/base/langflow/components/helpers/store_message.py
+++ b/src/backend/base/langflow/components/helpers/store_message.py
@@ -1,9 +1,11 @@
-from langflow.custom import Component
-from langflow.inputs import HandleInput
-from langflow.inputs.inputs import MessageTextInput
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import (
+ HandleInput,
+ MessageTextInput,
+)
from langflow.memory import aget_messages, astore_message
from langflow.schema.message import Message
-from langflow.template import Output
+from langflow.template.field.base import Output
from langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI
diff --git a/src/backend/base/langflow/components/homeassistant/home_assistant_control.py b/src/backend/base/langflow/components/homeassistant/home_assistant_control.py
index 19c890e8d..b295c1368 100644
--- a/src/backend/base/langflow/components/homeassistant/home_assistant_control.py
+++ b/src/backend/base/langflow/components/homeassistant/home_assistant_control.py
@@ -7,8 +7,8 @@ from pydantic import BaseModel, Field
from langflow.base.langchain_utilities.model import LCToolComponent
from langflow.field_typing import Tool
-from langflow.inputs import SecretStrInput, StrInput
-from langflow.schema import Data
+from langflow.inputs.inputs import SecretStrInput, StrInput
+from langflow.schema.data import Data
class HomeAssistantControl(LCToolComponent):
diff --git a/src/backend/base/langflow/components/homeassistant/list_home_assistant_states.py b/src/backend/base/langflow/components/homeassistant/list_home_assistant_states.py
index 00e4a9c28..048db2d18 100644
--- a/src/backend/base/langflow/components/homeassistant/list_home_assistant_states.py
+++ b/src/backend/base/langflow/components/homeassistant/list_home_assistant_states.py
@@ -7,8 +7,8 @@ from pydantic import BaseModel, Field
from langflow.base.langchain_utilities.model import LCToolComponent
from langflow.field_typing import Tool
-from langflow.inputs import SecretStrInput, StrInput
-from langflow.schema import Data
+from langflow.inputs.inputs import SecretStrInput, StrInput
+from langflow.schema.data import Data
class ListHomeAssistantStates(LCToolComponent):
diff --git a/src/backend/base/langflow/components/icosacomputing/combinatorial_reasoner.py b/src/backend/base/langflow/components/icosacomputing/combinatorial_reasoner.py
index 22b2e782d..3ab494d9f 100644
--- a/src/backend/base/langflow/components/icosacomputing/combinatorial_reasoner.py
+++ b/src/backend/base/langflow/components/icosacomputing/combinatorial_reasoner.py
@@ -2,10 +2,10 @@ import requests
from requests.auth import HTTPBasicAuth
from langflow.base.models.openai_constants import OPENAI_MODEL_NAMES
-from langflow.custom import Component
-from langflow.inputs import DropdownInput, SecretStrInput, StrInput
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import DropdownInput, SecretStrInput, StrInput
from langflow.io import MessageTextInput, Output
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.schema.message import Message
diff --git a/src/backend/base/langflow/components/input_output/chat.py b/src/backend/base/langflow/components/input_output/chat.py
index 7c29fb346..2409d998a 100644
--- a/src/backend/base/langflow/components/input_output/chat.py
+++ b/src/backend/base/langflow/components/input_output/chat.py
@@ -1,6 +1,6 @@
from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES
from langflow.base.io.chat import ChatComponent
-from langflow.inputs import BoolInput
+from langflow.inputs.inputs import BoolInput
from langflow.io import (
DropdownInput,
FileInput,
diff --git a/src/backend/base/langflow/components/input_output/chat_output.py b/src/backend/base/langflow/components/input_output/chat_output.py
index 04970e9dc..e1092a9cc 100644
--- a/src/backend/base/langflow/components/input_output/chat_output.py
+++ b/src/backend/base/langflow/components/input_output/chat_output.py
@@ -6,13 +6,12 @@ from fastapi.encoders import jsonable_encoder
from langflow.base.io.chat import ChatComponent
from langflow.helpers.data import safe_convert
-from langflow.inputs import BoolInput
-from langflow.inputs.inputs import HandleInput
-from langflow.io import DropdownInput, MessageTextInput, Output
+from langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput
from langflow.schema.data import Data
from langflow.schema.dataframe import DataFrame
from langflow.schema.message import Message
from langflow.schema.properties import Source
+from langflow.template.field.base import Output
from langflow.utils.constants import (
MESSAGE_SENDER_AI,
MESSAGE_SENDER_NAME_AI,
diff --git a/src/backend/base/langflow/components/langchain_utilities/character.py b/src/backend/base/langflow/components/langchain_utilities/character.py
index 92dacd519..2f075ebc8 100644
--- a/src/backend/base/langflow/components/langchain_utilities/character.py
+++ b/src/backend/base/langflow/components/langchain_utilities/character.py
@@ -3,7 +3,7 @@ from typing import Any
from langchain_text_splitters import CharacterTextSplitter, TextSplitter
from langflow.base.textsplitters.model import LCTextSplitterComponent
-from langflow.inputs import DataInput, IntInput, MessageTextInput
+from langflow.inputs.inputs import DataInput, IntInput, MessageTextInput
from langflow.utils.util import unescape_string
diff --git a/src/backend/base/langflow/components/langchain_utilities/conversation.py b/src/backend/base/langflow/components/langchain_utilities/conversation.py
index 6e9cbaa60..4542118a0 100644
--- a/src/backend/base/langflow/components/langchain_utilities/conversation.py
+++ b/src/backend/base/langflow/components/langchain_utilities/conversation.py
@@ -2,7 +2,7 @@ from langchain.chains import ConversationChain
from langflow.base.chains.model import LCChainComponent
from langflow.field_typing import Message
-from langflow.inputs import HandleInput, MultilineInput
+from langflow.inputs.inputs import HandleInput, MultilineInput
class ConversationChainComponent(LCChainComponent):
diff --git a/src/backend/base/langflow/components/langchain_utilities/csv_agent.py b/src/backend/base/langflow/components/langchain_utilities/csv_agent.py
index b85a66ca2..757829692 100644
--- a/src/backend/base/langflow/components/langchain_utilities/csv_agent.py
+++ b/src/backend/base/langflow/components/langchain_utilities/csv_agent.py
@@ -2,8 +2,13 @@ from langchain_experimental.agents.agent_toolkits.csv.base import create_csv_age
from langflow.base.agents.agent import LCAgentComponent
from langflow.field_typing import AgentExecutor
-from langflow.inputs import DropdownInput, FileInput, HandleInput
-from langflow.inputs.inputs import DictInput, MessageTextInput
+from langflow.inputs.inputs import (
+ DictInput,
+ DropdownInput,
+ FileInput,
+ HandleInput,
+ MessageTextInput,
+)
from langflow.schema.message import Message
from langflow.template.field.base import Output
diff --git a/src/backend/base/langflow/components/langchain_utilities/html_link_extractor.py b/src/backend/base/langflow/components/langchain_utilities/html_link_extractor.py
index 0d3e654ab..5a06b9c9b 100644
--- a/src/backend/base/langflow/components/langchain_utilities/html_link_extractor.py
+++ b/src/backend/base/langflow/components/langchain_utilities/html_link_extractor.py
@@ -4,7 +4,7 @@ from langchain_community.graph_vectorstores.extractors import HtmlLinkExtractor,
from langchain_core.documents import BaseDocumentTransformer
from langflow.base.document_transformers.model import LCDocumentTransformerComponent
-from langflow.inputs import BoolInput, DataInput, StrInput
+from langflow.inputs.inputs import BoolInput, DataInput, StrInput
class HtmlLinkExtractorComponent(LCDocumentTransformerComponent):
diff --git a/src/backend/base/langflow/components/langchain_utilities/json_agent.py b/src/backend/base/langflow/components/langchain_utilities/json_agent.py
index e241ac5fc..102266019 100644
--- a/src/backend/base/langflow/components/langchain_utilities/json_agent.py
+++ b/src/backend/base/langflow/components/langchain_utilities/json_agent.py
@@ -7,7 +7,7 @@ from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
from langchain_community.tools.json.tool import JsonSpec
from langflow.base.agents.agent import LCAgentComponent
-from langflow.inputs import FileInput, HandleInput
+from langflow.inputs.inputs import FileInput, HandleInput
class JsonAgentComponent(LCAgentComponent):
diff --git a/src/backend/base/langflow/components/langchain_utilities/json_document_builder.py b/src/backend/base/langflow/components/langchain_utilities/json_document_builder.py
index 402a65182..9f3fbf197 100644
--- a/src/backend/base/langflow/components/langchain_utilities/json_document_builder.py
+++ b/src/backend/base/langflow/components/langchain_utilities/json_document_builder.py
@@ -13,7 +13,7 @@
from langchain_core.documents import Document
-from langflow.custom import CustomComponent
+from langflow.custom.custom_component.custom_component import CustomComponent
from langflow.services.database.models.base import orjson_dumps
diff --git a/src/backend/base/langflow/components/langchain_utilities/langchain_hub.py b/src/backend/base/langflow/components/langchain_utilities/langchain_hub.py
index 593afd369..64ebca355 100644
--- a/src/backend/base/langflow/components/langchain_utilities/langchain_hub.py
+++ b/src/backend/base/langflow/components/langchain_utilities/langchain_hub.py
@@ -2,8 +2,8 @@ import re
from langchain_core.prompts import HumanMessagePromptTemplate
-from langflow.custom import Component
-from langflow.inputs import DefaultPromptField, SecretStrInput, StrInput
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import DefaultPromptField, SecretStrInput, StrInput
from langflow.io import Output
from langflow.schema.message import Message
diff --git a/src/backend/base/langflow/components/langchain_utilities/language_recursive.py b/src/backend/base/langflow/components/langchain_utilities/language_recursive.py
index 66f909e96..705b6a6e3 100644
--- a/src/backend/base/langflow/components/langchain_utilities/language_recursive.py
+++ b/src/backend/base/langflow/components/langchain_utilities/language_recursive.py
@@ -3,7 +3,7 @@ from typing import Any
from langchain_text_splitters import Language, RecursiveCharacterTextSplitter, TextSplitter
from langflow.base.textsplitters.model import LCTextSplitterComponent
-from langflow.inputs import DataInput, DropdownInput, IntInput
+from langflow.inputs.inputs import DataInput, DropdownInput, IntInput
class LanguageRecursiveTextSplitterComponent(LCTextSplitterComponent):
diff --git a/src/backend/base/langflow/components/langchain_utilities/language_semantic.py b/src/backend/base/langflow/components/langchain_utilities/language_semantic.py
index 6edad7d1c..322fb742b 100644
--- a/src/backend/base/langflow/components/langchain_utilities/language_semantic.py
+++ b/src/backend/base/langflow/components/langchain_utilities/language_semantic.py
@@ -10,7 +10,7 @@ from langflow.io import (
MessageTextInput,
Output,
)
-from langflow.schema import Data
+from langflow.schema.data import Data
class SemanticTextSplitterComponent(LCTextSplitterComponent):
diff --git a/src/backend/base/langflow/components/langchain_utilities/llm_checker.py b/src/backend/base/langflow/components/langchain_utilities/llm_checker.py
index b9ba6a93d..54bae37ef 100644
--- a/src/backend/base/langflow/components/langchain_utilities/llm_checker.py
+++ b/src/backend/base/langflow/components/langchain_utilities/llm_checker.py
@@ -2,7 +2,7 @@ from langchain.chains import LLMCheckerChain
from langflow.base.chains.model import LCChainComponent
from langflow.field_typing import Message
-from langflow.inputs import HandleInput, MultilineInput
+from langflow.inputs.inputs import HandleInput, MultilineInput
class LLMCheckerChainComponent(LCChainComponent):
diff --git a/src/backend/base/langflow/components/langchain_utilities/llm_math.py b/src/backend/base/langflow/components/langchain_utilities/llm_math.py
index 824cf685e..6892a0714 100644
--- a/src/backend/base/langflow/components/langchain_utilities/llm_math.py
+++ b/src/backend/base/langflow/components/langchain_utilities/llm_math.py
@@ -2,8 +2,8 @@ from langchain.chains import LLMMathChain
from langflow.base.chains.model import LCChainComponent
from langflow.field_typing import Message
-from langflow.inputs import HandleInput, MultilineInput
-from langflow.template import Output
+from langflow.inputs.inputs import HandleInput, MultilineInput
+from langflow.template.field.base import Output
class LLMMathChainComponent(LCChainComponent):
diff --git a/src/backend/base/langflow/components/langchain_utilities/natural_language.py b/src/backend/base/langflow/components/langchain_utilities/natural_language.py
index f6e558d9c..633f2758b 100644
--- a/src/backend/base/langflow/components/langchain_utilities/natural_language.py
+++ b/src/backend/base/langflow/components/langchain_utilities/natural_language.py
@@ -3,7 +3,7 @@ from typing import Any
from langchain_text_splitters import NLTKTextSplitter, TextSplitter
from langflow.base.textsplitters.model import LCTextSplitterComponent
-from langflow.inputs import DataInput, IntInput, MessageTextInput
+from langflow.inputs.inputs import DataInput, IntInput, MessageTextInput
from langflow.utils.util import unescape_string
diff --git a/src/backend/base/langflow/components/langchain_utilities/openai_tools.py b/src/backend/base/langflow/components/langchain_utilities/openai_tools.py
index c529330c5..a5590dcd9 100644
--- a/src/backend/base/langflow/components/langchain_utilities/openai_tools.py
+++ b/src/backend/base/langflow/components/langchain_utilities/openai_tools.py
@@ -2,9 +2,12 @@ from langchain.agents import create_openai_tools_agent
from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, PromptTemplate
from langflow.base.agents.agent import LCToolsAgentComponent
-from langflow.inputs import MultilineInput
-from langflow.inputs.inputs import DataInput, HandleInput
-from langflow.schema import Data
+from langflow.inputs.inputs import (
+ DataInput,
+ HandleInput,
+ MultilineInput,
+)
+from langflow.schema.data import Data
class OpenAIToolsAgentComponent(LCToolsAgentComponent):
diff --git a/src/backend/base/langflow/components/langchain_utilities/openapi.py b/src/backend/base/langflow/components/langchain_utilities/openapi.py
index 3e662e7d5..384432620 100644
--- a/src/backend/base/langflow/components/langchain_utilities/openapi.py
+++ b/src/backend/base/langflow/components/langchain_utilities/openapi.py
@@ -8,7 +8,7 @@ from langchain_community.tools.json.tool import JsonSpec
from langchain_community.utilities.requests import TextRequestsWrapper
from langflow.base.agents.agent import LCAgentComponent
-from langflow.inputs import BoolInput, FileInput, HandleInput
+from langflow.inputs.inputs import BoolInput, FileInput, HandleInput
class OpenAPIAgentComponent(LCAgentComponent):
diff --git a/src/backend/base/langflow/components/langchain_utilities/retrieval_qa.py b/src/backend/base/langflow/components/langchain_utilities/retrieval_qa.py
index deebd7b54..66fa77478 100644
--- a/src/backend/base/langflow/components/langchain_utilities/retrieval_qa.py
+++ b/src/backend/base/langflow/components/langchain_utilities/retrieval_qa.py
@@ -2,7 +2,7 @@ from langchain.chains import RetrievalQA
from langflow.base.chains.model import LCChainComponent
from langflow.field_typing import Message
-from langflow.inputs import BoolInput, DropdownInput, HandleInput, MultilineInput
+from langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MultilineInput
class RetrievalQAComponent(LCChainComponent):
diff --git a/src/backend/base/langflow/components/langchain_utilities/retriever.py b/src/backend/base/langflow/components/langchain_utilities/retriever.py
index c016ec0df..40f0f2beb 100644
--- a/src/backend/base/langflow/components/langchain_utilities/retriever.py
+++ b/src/backend/base/langflow/components/langchain_utilities/retriever.py
@@ -1,6 +1,6 @@
from langchain_core.tools import create_retriever_tool
-from langflow.custom import CustomComponent
+from langflow.custom.custom_component.custom_component import CustomComponent
from langflow.field_typing import BaseRetriever, Tool
diff --git a/src/backend/base/langflow/components/langchain_utilities/runnable_executor.py b/src/backend/base/langflow/components/langchain_utilities/runnable_executor.py
index dccb03f01..dee616ecb 100644
--- a/src/backend/base/langflow/components/langchain_utilities/runnable_executor.py
+++ b/src/backend/base/langflow/components/langchain_utilities/runnable_executor.py
@@ -1,9 +1,9 @@
from langchain.agents import AgentExecutor
-from langflow.custom import Component
-from langflow.inputs import BoolInput, HandleInput, MessageTextInput
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import BoolInput, HandleInput, MessageTextInput
from langflow.schema.message import Message
-from langflow.template import Output
+from langflow.template.field.base import Output
class RunnableExecComponent(Component):
diff --git a/src/backend/base/langflow/components/langchain_utilities/self_query.py b/src/backend/base/langflow/components/langchain_utilities/self_query.py
index 0f3aceeb1..6f7bfc002 100644
--- a/src/backend/base/langflow/components/langchain_utilities/self_query.py
+++ b/src/backend/base/langflow/components/langchain_utilities/self_query.py
@@ -1,10 +1,10 @@
from langchain.chains.query_constructor.base import AttributeInfo
from langchain.retrievers.self_query.base import SelfQueryRetriever
-from langflow.custom import Component
-from langflow.inputs import HandleInput, MessageTextInput
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import HandleInput, MessageTextInput
from langflow.io import Output
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.schema.message import Message
diff --git a/src/backend/base/langflow/components/langchain_utilities/spider.py b/src/backend/base/langflow/components/langchain_utilities/spider.py
index 30c2aba32..3d615f9a1 100644
--- a/src/backend/base/langflow/components/langchain_utilities/spider.py
+++ b/src/backend/base/langflow/components/langchain_utilities/spider.py
@@ -1,7 +1,7 @@
from spider.spider import Spider
from langflow.base.langchain_utilities.spider_constants import MODES
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import (
BoolInput,
DictInput,
@@ -11,7 +11,7 @@ from langflow.io import (
SecretStrInput,
StrInput,
)
-from langflow.schema import Data
+from langflow.schema.data import Data
class SpiderTool(Component):
diff --git a/src/backend/base/langflow/components/langchain_utilities/sql.py b/src/backend/base/langflow/components/langchain_utilities/sql.py
index 06916e252..5edfd77d6 100644
--- a/src/backend/base/langflow/components/langchain_utilities/sql.py
+++ b/src/backend/base/langflow/components/langchain_utilities/sql.py
@@ -4,7 +4,7 @@ from langchain_community.agent_toolkits.sql.base import create_sql_agent
from langchain_community.utilities import SQLDatabase
from langflow.base.agents.agent import LCAgentComponent
-from langflow.inputs import HandleInput, MessageTextInput
+from langflow.inputs.inputs import HandleInput, MessageTextInput
class SQLAgentComponent(LCAgentComponent):
diff --git a/src/backend/base/langflow/components/langchain_utilities/sql_database.py b/src/backend/base/langflow/components/langchain_utilities/sql_database.py
index 9475581bf..2f2c042a3 100644
--- a/src/backend/base/langflow/components/langchain_utilities/sql_database.py
+++ b/src/backend/base/langflow/components/langchain_utilities/sql_database.py
@@ -2,7 +2,7 @@ from langchain_community.utilities.sql_database import SQLDatabase
from sqlalchemy import create_engine
from sqlalchemy.pool import StaticPool
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import (
Output,
StrInput,
diff --git a/src/backend/base/langflow/components/langchain_utilities/sql_generator.py b/src/backend/base/langflow/components/langchain_utilities/sql_generator.py
index eb0cc841a..31f3112a0 100644
--- a/src/backend/base/langflow/components/langchain_utilities/sql_generator.py
+++ b/src/backend/base/langflow/components/langchain_utilities/sql_generator.py
@@ -5,8 +5,8 @@ from langchain_core.prompts import PromptTemplate
from langflow.base.chains.model import LCChainComponent
from langflow.field_typing import Message
-from langflow.inputs import HandleInput, IntInput, MultilineInput
-from langflow.template import Output
+from langflow.inputs.inputs import HandleInput, IntInput, MultilineInput
+from langflow.template.field.base import Output
if TYPE_CHECKING:
from langchain_core.runnables import Runnable
diff --git a/src/backend/base/langflow/components/langchain_utilities/tool_calling.py b/src/backend/base/langflow/components/langchain_utilities/tool_calling.py
index 0f3ce7d6c..055cd0738 100644
--- a/src/backend/base/langflow/components/langchain_utilities/tool_calling.py
+++ b/src/backend/base/langflow/components/langchain_utilities/tool_calling.py
@@ -2,9 +2,12 @@ from langchain.agents import create_tool_calling_agent
from langchain_core.prompts import ChatPromptTemplate
from langflow.base.agents.agent import LCToolsAgentComponent
-from langflow.inputs import MessageTextInput
-from langflow.inputs.inputs import DataInput, HandleInput
-from langflow.schema import Data
+from langflow.inputs.inputs import (
+ DataInput,
+ HandleInput,
+ MessageTextInput,
+)
+from langflow.schema.data import Data
class ToolCallingAgentComponent(LCToolsAgentComponent):
diff --git a/src/backend/base/langflow/components/langchain_utilities/vector_store.py b/src/backend/base/langflow/components/langchain_utilities/vector_store.py
index dfd56deb0..a4f676df7 100644
--- a/src/backend/base/langflow/components/langchain_utilities/vector_store.py
+++ b/src/backend/base/langflow/components/langchain_utilities/vector_store.py
@@ -1,6 +1,6 @@
from langchain_core.vectorstores import VectorStoreRetriever
-from langflow.custom import CustomComponent
+from langflow.custom.custom_component.custom_component import CustomComponent
from langflow.field_typing import VectorStore
diff --git a/src/backend/base/langflow/components/langchain_utilities/vector_store_info.py b/src/backend/base/langflow/components/langchain_utilities/vector_store_info.py
index 4658f05dc..d16b3a8c1 100644
--- a/src/backend/base/langflow/components/langchain_utilities/vector_store_info.py
+++ b/src/backend/base/langflow/components/langchain_utilities/vector_store_info.py
@@ -1,8 +1,8 @@
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreInfo
-from langflow.custom import Component
-from langflow.inputs import HandleInput, MessageTextInput, MultilineInput
-from langflow.template import Output
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import HandleInput, MessageTextInput, MultilineInput
+from langflow.template.field.base import Output
class VectorStoreInfoComponent(Component):
diff --git a/src/backend/base/langflow/components/langchain_utilities/vector_store_router.py b/src/backend/base/langflow/components/langchain_utilities/vector_store_router.py
index 6c44c2d5c..8f49d7176 100644
--- a/src/backend/base/langflow/components/langchain_utilities/vector_store_router.py
+++ b/src/backend/base/langflow/components/langchain_utilities/vector_store_router.py
@@ -2,7 +2,7 @@ from langchain.agents import AgentExecutor, create_vectorstore_router_agent
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreRouterToolkit
from langflow.base.agents.agent import LCAgentComponent
-from langflow.inputs import HandleInput
+from langflow.inputs.inputs import HandleInput
class VectorStoreRouterAgentComponent(LCAgentComponent):
diff --git a/src/backend/base/langflow/components/langchain_utilities/xml_agent.py b/src/backend/base/langflow/components/langchain_utilities/xml_agent.py
index 5e31b4d13..a501f19d6 100644
--- a/src/backend/base/langflow/components/langchain_utilities/xml_agent.py
+++ b/src/backend/base/langflow/components/langchain_utilities/xml_agent.py
@@ -2,9 +2,12 @@ from langchain.agents import create_xml_agent
from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, PromptTemplate
from langflow.base.agents.agent import LCToolsAgentComponent
-from langflow.inputs import MultilineInput
-from langflow.inputs.inputs import DataInput, HandleInput
-from langflow.schema import Data
+from langflow.inputs.inputs import (
+ DataInput,
+ HandleInput,
+ MultilineInput,
+)
+from langflow.schema.data import Data
class XMLAgentComponent(LCToolsAgentComponent):
diff --git a/src/backend/base/langflow/components/languagemodels/aiml.py b/src/backend/base/langflow/components/languagemodels/aiml.py
index dd0613b6a..941cb22f3 100644
--- a/src/backend/base/langflow/components/languagemodels/aiml.py
+++ b/src/backend/base/langflow/components/languagemodels/aiml.py
@@ -6,7 +6,7 @@ from langflow.base.models.aiml_constants import AimlModels
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
from langflow.field_typing.range_spec import RangeSpec
-from langflow.inputs import (
+from langflow.inputs.inputs import (
DictInput,
DropdownInput,
IntInput,
diff --git a/src/backend/base/langflow/components/languagemodels/azure_openai.py b/src/backend/base/langflow/components/languagemodels/azure_openai.py
index cc6fa2a0b..c44aed57a 100644
--- a/src/backend/base/langflow/components/languagemodels/azure_openai.py
+++ b/src/backend/base/langflow/components/languagemodels/azure_openai.py
@@ -3,7 +3,7 @@ from langchain_openai import AzureChatOpenAI
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
from langflow.field_typing.range_spec import RangeSpec
-from langflow.inputs import MessageTextInput
+from langflow.inputs.inputs import MessageTextInput
from langflow.io import DropdownInput, IntInput, SecretStrInput, SliderInput
diff --git a/src/backend/base/langflow/components/languagemodels/deepseek.py b/src/backend/base/langflow/components/languagemodels/deepseek.py
index a2253d1ee..0e56fedb8 100644
--- a/src/backend/base/langflow/components/languagemodels/deepseek.py
+++ b/src/backend/base/langflow/components/languagemodels/deepseek.py
@@ -5,7 +5,7 @@ from typing_extensions import override
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
from langflow.field_typing.range_spec import RangeSpec
-from langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput
+from langflow.inputs.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput
DEEPSEEK_MODELS = ["deepseek-chat"]
diff --git a/src/backend/base/langflow/components/languagemodels/google_generative_ai.py b/src/backend/base/langflow/components/languagemodels/google_generative_ai.py
index b8a6e59e6..543428fd9 100644
--- a/src/backend/base/langflow/components/languagemodels/google_generative_ai.py
+++ b/src/backend/base/langflow/components/languagemodels/google_generative_ai.py
@@ -8,9 +8,15 @@ from langflow.base.models.google_generative_ai_constants import GOOGLE_GENERATIV
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
from langflow.field_typing.range_spec import RangeSpec
-from langflow.inputs import DropdownInput, FloatInput, IntInput, SecretStrInput, SliderInput
-from langflow.inputs.inputs import BoolInput
-from langflow.schema import dotdict
+from langflow.inputs.inputs import (
+ BoolInput,
+ DropdownInput,
+ FloatInput,
+ IntInput,
+ SecretStrInput,
+ SliderInput,
+)
+from langflow.schema.dotdict import dotdict
class GoogleGenerativeAIComponent(LCModelComponent):
diff --git a/src/backend/base/langflow/components/languagemodels/lmstudiomodel.py b/src/backend/base/langflow/components/languagemodels/lmstudiomodel.py
index 1680699c2..92a04a670 100644
--- a/src/backend/base/langflow/components/languagemodels/lmstudiomodel.py
+++ b/src/backend/base/langflow/components/languagemodels/lmstudiomodel.py
@@ -8,7 +8,7 @@ from typing_extensions import override
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
from langflow.field_typing.range_spec import RangeSpec
-from langflow.inputs import DictInput, DropdownInput, FloatInput, IntInput, SecretStrInput, StrInput
+from langflow.inputs.inputs import DictInput, DropdownInput, FloatInput, IntInput, SecretStrInput, StrInput
class LMStudioModelComponent(LCModelComponent):
diff --git a/src/backend/base/langflow/components/languagemodels/maritalk.py b/src/backend/base/langflow/components/languagemodels/maritalk.py
index b2f386e10..094f5f537 100644
--- a/src/backend/base/langflow/components/languagemodels/maritalk.py
+++ b/src/backend/base/langflow/components/languagemodels/maritalk.py
@@ -3,7 +3,7 @@ from langchain_community.chat_models import ChatMaritalk
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
from langflow.field_typing.range_spec import RangeSpec
-from langflow.inputs import DropdownInput, FloatInput, IntInput, SecretStrInput
+from langflow.inputs.inputs import DropdownInput, FloatInput, IntInput, SecretStrInput
class MaritalkModelComponent(LCModelComponent):
diff --git a/src/backend/base/langflow/components/languagemodels/novita.py b/src/backend/base/langflow/components/languagemodels/novita.py
index 4dcd527d3..76fdaca15 100644
--- a/src/backend/base/langflow/components/languagemodels/novita.py
+++ b/src/backend/base/langflow/components/languagemodels/novita.py
@@ -7,15 +7,15 @@ from langflow.base.models.model import LCModelComponent
from langflow.base.models.novita_constants import MODEL_NAMES
from langflow.field_typing import LanguageModel
from langflow.field_typing.range_spec import RangeSpec
-from langflow.inputs import (
+from langflow.inputs.inputs import (
BoolInput,
DictInput,
DropdownInput,
+ HandleInput,
IntInput,
SecretStrInput,
SliderInput,
)
-from langflow.inputs.inputs import HandleInput
class NovitaModelComponent(LCModelComponent):
diff --git a/src/backend/base/langflow/components/languagemodels/nvidia.py b/src/backend/base/langflow/components/languagemodels/nvidia.py
index ec4878e7d..ea812dcf3 100644
--- a/src/backend/base/langflow/components/languagemodels/nvidia.py
+++ b/src/backend/base/langflow/components/languagemodels/nvidia.py
@@ -7,7 +7,7 @@ from urllib3.exceptions import MaxRetryError, NameResolutionError
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
from langflow.field_typing.range_spec import RangeSpec
-from langflow.inputs import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput, SliderInput
+from langflow.inputs.inputs import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput, SliderInput
from langflow.schema.dotdict import dotdict
diff --git a/src/backend/base/langflow/components/languagemodels/openai_chat_model.py b/src/backend/base/langflow/components/languagemodels/openai_chat_model.py
index 34656e32d..e79e3b4d8 100644
--- a/src/backend/base/langflow/components/languagemodels/openai_chat_model.py
+++ b/src/backend/base/langflow/components/languagemodels/openai_chat_model.py
@@ -10,7 +10,7 @@ from langflow.base.models.openai_constants import (
)
from langflow.field_typing import LanguageModel
from langflow.field_typing.range_spec import RangeSpec
-from langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput
+from langflow.inputs.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput
from langflow.logging import logger
diff --git a/src/backend/base/langflow/components/languagemodels/openrouter.py b/src/backend/base/langflow/components/languagemodels/openrouter.py
index c0292cf68..c41afd576 100644
--- a/src/backend/base/langflow/components/languagemodels/openrouter.py
+++ b/src/backend/base/langflow/components/languagemodels/openrouter.py
@@ -8,7 +8,7 @@ from pydantic.v1 import SecretStr
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
from langflow.field_typing.range_spec import RangeSpec
-from langflow.inputs import (
+from langflow.inputs.inputs import (
DropdownInput,
IntInput,
SecretStrInput,
diff --git a/src/backend/base/langflow/components/languagemodels/vertexai.py b/src/backend/base/langflow/components/languagemodels/vertexai.py
index 20e616177..000d2ae8d 100644
--- a/src/backend/base/langflow/components/languagemodels/vertexai.py
+++ b/src/backend/base/langflow/components/languagemodels/vertexai.py
@@ -2,7 +2,7 @@ from typing import cast
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
-from langflow.inputs import MessageTextInput
+from langflow.inputs.inputs import MessageTextInput
from langflow.io import BoolInput, FileInput, FloatInput, IntInput, StrInput
diff --git a/src/backend/base/langflow/components/languagemodels/watsonx.py b/src/backend/base/langflow/components/languagemodels/watsonx.py
index c8534c7fb..87bcd51d7 100644
--- a/src/backend/base/langflow/components/languagemodels/watsonx.py
+++ b/src/backend/base/langflow/components/languagemodels/watsonx.py
@@ -9,7 +9,7 @@ from pydantic.v1 import SecretStr
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
from langflow.field_typing.range_spec import RangeSpec
-from langflow.inputs import BoolInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput
+from langflow.inputs.inputs import BoolInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput
from langflow.schema.dotdict import dotdict
diff --git a/src/backend/base/langflow/components/languagemodels/xai.py b/src/backend/base/langflow/components/languagemodels/xai.py
index 81bf3892b..f9e3ff8d2 100644
--- a/src/backend/base/langflow/components/languagemodels/xai.py
+++ b/src/backend/base/langflow/components/languagemodels/xai.py
@@ -6,7 +6,15 @@ from typing_extensions import override
from langflow.base.models.model import LCModelComponent
from langflow.field_typing import LanguageModel
from langflow.field_typing.range_spec import RangeSpec
-from langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput, SliderInput
+from langflow.inputs.inputs import (
+ BoolInput,
+ DictInput,
+ DropdownInput,
+ IntInput,
+ MessageTextInput,
+ SecretStrInput,
+ SliderInput,
+)
XAI_DEFAULT_MODELS = ["grok-2-latest"]
diff --git a/src/backend/base/langflow/components/langwatch/langwatch.py b/src/backend/base/langflow/components/langwatch/langwatch.py
index a8668fc3d..814c01159 100644
--- a/src/backend/base/langflow/components/langwatch/langwatch.py
+++ b/src/backend/base/langflow/components/langwatch/langwatch.py
@@ -5,7 +5,7 @@ from typing import Any
import httpx
from loguru import logger
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.inputs.inputs import MultilineInput
from langflow.io import (
BoolInput,
@@ -17,7 +17,7 @@ from langflow.io import (
Output,
SecretStrInput,
)
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.schema.dotdict import dotdict
diff --git a/src/backend/base/langflow/components/logic/conditional_router.py b/src/backend/base/langflow/components/logic/conditional_router.py
index 3649572f6..9caf46ebe 100644
--- a/src/backend/base/langflow/components/logic/conditional_router.py
+++ b/src/backend/base/langflow/components/logic/conditional_router.py
@@ -1,6 +1,6 @@
import re
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import BoolInput, DropdownInput, IntInput, MessageInput, MessageTextInput, Output
from langflow.schema.message import Message
diff --git a/src/backend/base/langflow/components/logic/data_conditional_router.py b/src/backend/base/langflow/components/logic/data_conditional_router.py
index a107458bb..d9547c0d6 100644
--- a/src/backend/base/langflow/components/logic/data_conditional_router.py
+++ b/src/backend/base/langflow/components/logic/data_conditional_router.py
@@ -1,8 +1,9 @@
from typing import Any
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import DataInput, DropdownInput, MessageTextInput, Output
-from langflow.schema import Data, dotdict
+from langflow.schema.data import Data
+from langflow.schema.dotdict import dotdict
class DataConditionalRouterComponent(Component):
diff --git a/src/backend/base/langflow/components/logic/flow_tool.py b/src/backend/base/langflow/components/logic/flow_tool.py
index a7ed4d19a..b80cbf514 100644
--- a/src/backend/base/langflow/components/logic/flow_tool.py
+++ b/src/backend/base/langflow/components/logic/flow_tool.py
@@ -9,7 +9,7 @@ from langflow.field_typing import Tool
from langflow.graph.graph.base import Graph
from langflow.helpers.flow import get_flow_inputs
from langflow.io import BoolInput, DropdownInput, Output, StrInput
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.schema.dotdict import dotdict
diff --git a/src/backend/base/langflow/components/logic/listen.py b/src/backend/base/langflow/components/logic/listen.py
index e2e6afa48..f68cb45f2 100644
--- a/src/backend/base/langflow/components/logic/listen.py
+++ b/src/backend/base/langflow/components/logic/listen.py
@@ -1,5 +1,5 @@
-from langflow.custom import CustomComponent
-from langflow.schema import Data
+from langflow.custom.custom_component.custom_component import CustomComponent
+from langflow.schema.data import Data
class ListenComponent(CustomComponent):
diff --git a/src/backend/base/langflow/components/logic/loop.py b/src/backend/base/langflow/components/logic/loop.py
index 6ddbe09c7..b17daece6 100644
--- a/src/backend/base/langflow/components/logic/loop.py
+++ b/src/backend/base/langflow/components/logic/loop.py
@@ -1,7 +1,8 @@
-from langflow.custom import Component
-from langflow.io import HandleInput, Output
-from langflow.schema import Data
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import HandleInput
+from langflow.schema.data import Data
from langflow.schema.dataframe import DataFrame
+from langflow.template.field.base import Output
class LoopComponent(Component):
diff --git a/src/backend/base/langflow/components/logic/notify.py b/src/backend/base/langflow/components/logic/notify.py
index 165b145fd..2e0d842e7 100644
--- a/src/backend/base/langflow/components/logic/notify.py
+++ b/src/backend/base/langflow/components/logic/notify.py
@@ -1,5 +1,5 @@
-from langflow.custom import CustomComponent
-from langflow.schema import Data
+from langflow.custom.custom_component.custom_component import CustomComponent
+from langflow.schema.data import Data
class NotifyComponent(CustomComponent):
diff --git a/src/backend/base/langflow/components/logic/pass_message.py b/src/backend/base/langflow/components/logic/pass_message.py
index f443f7b37..a8b066519 100644
--- a/src/backend/base/langflow/components/logic/pass_message.py
+++ b/src/backend/base/langflow/components/logic/pass_message.py
@@ -1,7 +1,7 @@
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import MessageInput
from langflow.schema.message import Message
-from langflow.template import Output
+from langflow.template.field.base import Output
class PassMessageComponent(Component):
diff --git a/src/backend/base/langflow/components/logic/run_flow.py b/src/backend/base/langflow/components/logic/run_flow.py
index d96a4fc5a..27dfadd6b 100644
--- a/src/backend/base/langflow/components/logic/run_flow.py
+++ b/src/backend/base/langflow/components/logic/run_flow.py
@@ -4,7 +4,7 @@ from loguru import logger
from langflow.base.tools.run_flow import RunFlowBaseComponent
from langflow.helpers.flow import run_flow
-from langflow.schema import dotdict
+from langflow.schema.dotdict import dotdict
class RunFlowComponent(RunFlowBaseComponent):
diff --git a/src/backend/base/langflow/components/logic/sub_flow.py b/src/backend/base/langflow/components/logic/sub_flow.py
index c6ddd7946..30e4fec7b 100644
--- a/src/backend/base/langflow/components/logic/sub_flow.py
+++ b/src/backend/base/langflow/components/logic/sub_flow.py
@@ -3,12 +3,13 @@ from typing import Any
from loguru import logger
from langflow.base.flow_processing.utils import build_data_from_result_data
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.graph.graph.base import Graph
from langflow.graph.vertex.base import Vertex
from langflow.helpers.flow import get_flow_inputs
from langflow.io import DropdownInput, Output
-from langflow.schema import Data, dotdict
+from langflow.schema.data import Data
+from langflow.schema.dotdict import dotdict
class SubFlowComponent(Component):
diff --git a/src/backend/base/langflow/components/memories/astra_db.py b/src/backend/base/langflow/components/memories/astra_db.py
index 6792057bc..63d8e2064 100644
--- a/src/backend/base/langflow/components/memories/astra_db.py
+++ b/src/backend/base/langflow/components/memories/astra_db.py
@@ -4,7 +4,7 @@ from astrapy.admin import parse_api_endpoint
from langflow.base.memory.model import LCChatMemoryComponent
from langflow.field_typing.constants import Memory
-from langflow.inputs import MessageTextInput, SecretStrInput, StrInput
+from langflow.inputs.inputs import MessageTextInput, SecretStrInput, StrInput
class AstraDBChatMemory(LCChatMemoryComponent):
diff --git a/src/backend/base/langflow/components/memories/cassandra.py b/src/backend/base/langflow/components/memories/cassandra.py
index 0eb694d2e..b91ca77c5 100644
--- a/src/backend/base/langflow/components/memories/cassandra.py
+++ b/src/backend/base/langflow/components/memories/cassandra.py
@@ -1,6 +1,6 @@
from langflow.base.memory.model import LCChatMemoryComponent
from langflow.field_typing.constants import Memory
-from langflow.inputs import DictInput, MessageTextInput, SecretStrInput
+from langflow.inputs.inputs import DictInput, MessageTextInput, SecretStrInput
class CassandraChatMemory(LCChatMemoryComponent):
diff --git a/src/backend/base/langflow/components/memories/mem0_chat_memory.py b/src/backend/base/langflow/components/memories/mem0_chat_memory.py
index 9c3f437f7..8bf8e78bd 100644
--- a/src/backend/base/langflow/components/memories/mem0_chat_memory.py
+++ b/src/backend/base/langflow/components/memories/mem0_chat_memory.py
@@ -4,7 +4,7 @@ from loguru import logger
from mem0 import Memory, MemoryClient
from langflow.base.memory.model import LCChatMemoryComponent
-from langflow.inputs import (
+from langflow.inputs.inputs import (
DictInput,
HandleInput,
MessageTextInput,
@@ -12,7 +12,7 @@ from langflow.inputs import (
SecretStrInput,
)
from langflow.io import Output
-from langflow.schema import Data
+from langflow.schema.data import Data
class Mem0MemoryComponent(LCChatMemoryComponent):
diff --git a/src/backend/base/langflow/components/memories/redis.py b/src/backend/base/langflow/components/memories/redis.py
index 0b0e64665..95b47da03 100644
--- a/src/backend/base/langflow/components/memories/redis.py
+++ b/src/backend/base/langflow/components/memories/redis.py
@@ -4,7 +4,7 @@ from langchain_community.chat_message_histories.redis import RedisChatMessageHis
from langflow.base.memory.model import LCChatMemoryComponent
from langflow.field_typing.constants import Memory
-from langflow.inputs import IntInput, MessageTextInput, SecretStrInput, StrInput
+from langflow.inputs.inputs import IntInput, MessageTextInput, SecretStrInput, StrInput
class RedisIndexChatMemory(LCChatMemoryComponent):
diff --git a/src/backend/base/langflow/components/memories/zep.py b/src/backend/base/langflow/components/memories/zep.py
index 6d0a0e379..27473a951 100644
--- a/src/backend/base/langflow/components/memories/zep.py
+++ b/src/backend/base/langflow/components/memories/zep.py
@@ -1,6 +1,6 @@
from langflow.base.memory.model import LCChatMemoryComponent
from langflow.field_typing.constants import Memory
-from langflow.inputs import DropdownInput, MessageTextInput, SecretStrInput
+from langflow.inputs.inputs import DropdownInput, MessageTextInput, SecretStrInput
class ZepChatMemory(LCChatMemoryComponent):
diff --git a/src/backend/base/langflow/components/nvidia/nvidia_ingest.py b/src/backend/base/langflow/components/nvidia/nvidia_ingest.py
index 7ee8f2227..e014b1098 100644
--- a/src/backend/base/langflow/components/nvidia/nvidia_ingest.py
+++ b/src/backend/base/langflow/components/nvidia/nvidia_ingest.py
@@ -3,15 +3,9 @@ from urllib.parse import urlparse
from pypdf import PdfReader
from langflow.base.data import BaseFileComponent
-from langflow.io import (
- BoolInput,
- DropdownInput,
- FloatInput,
- IntInput,
- MessageTextInput,
- SecretStrInput,
-)
-from langflow.schema import Data
+from langflow.base.data.base_file import BaseFileComponent
+from langflow.inputs.inputs import BoolInput, DropdownInput, FloatInput, IntInput, MessageTextInput, SecretStrInput
+from langflow.schema.data import Data
class NvidiaIngestComponent(BaseFileComponent):
diff --git a/src/backend/base/langflow/components/olivya/olivya.py b/src/backend/base/langflow/components/olivya/olivya.py
index 1604df9be..aed19dd3b 100644
--- a/src/backend/base/langflow/components/olivya/olivya.py
+++ b/src/backend/base/langflow/components/olivya/olivya.py
@@ -3,9 +3,9 @@ import json
import httpx
from loguru import logger
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import MessageTextInput, Output
-from langflow.schema import Data
+from langflow.schema.data import Data
class OlivyaComponent(Component):
diff --git a/src/backend/base/langflow/components/processing/alter_metadata.py b/src/backend/base/langflow/components/processing/alter_metadata.py
index 7a6536bb1..5f158292a 100644
--- a/src/backend/base/langflow/components/processing/alter_metadata.py
+++ b/src/backend/base/langflow/components/processing/alter_metadata.py
@@ -1,7 +1,8 @@
-from langflow.custom import Component
-from langflow.inputs import MessageTextInput
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import MessageTextInput
from langflow.io import HandleInput, NestedDictInput, Output, StrInput
-from langflow.schema import Data, DataFrame
+from langflow.schema.data import Data
+from langflow.schema.dataframe import DataFrame
class AlterMetadataComponent(Component):
diff --git a/src/backend/base/langflow/components/processing/batch_run.py b/src/backend/base/langflow/components/processing/batch_run.py
index b54523bd6..ff60a0152 100644
--- a/src/backend/base/langflow/components/processing/batch_run.py
+++ b/src/backend/base/langflow/components/processing/batch_run.py
@@ -5,9 +5,9 @@ from typing import TYPE_CHECKING, Any, cast
import toml # type: ignore[import-untyped]
from loguru import logger
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import BoolInput, DataFrameInput, HandleInput, MessageTextInput, MultilineInput, Output
-from langflow.schema import DataFrame
+from langflow.schema.dataframe import DataFrame
if TYPE_CHECKING:
from langchain_core.runnables import Runnable
diff --git a/src/backend/base/langflow/components/processing/combine_text.py b/src/backend/base/langflow/components/processing/combine_text.py
index b68bcd200..9ac1fc524 100644
--- a/src/backend/base/langflow/components/processing/combine_text.py
+++ b/src/backend/base/langflow/components/processing/combine_text.py
@@ -1,4 +1,4 @@
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import MessageTextInput, Output
from langflow.schema.message import Message
diff --git a/src/backend/base/langflow/components/processing/create_data.py b/src/backend/base/langflow/components/processing/create_data.py
index ec528c01c..639f41278 100644
--- a/src/backend/base/langflow/components/processing/create_data.py
+++ b/src/backend/base/langflow/components/processing/create_data.py
@@ -1,10 +1,10 @@
from typing import Any
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.field_typing.range_spec import RangeSpec
from langflow.inputs.inputs import BoolInput, DictInput, IntInput, MessageTextInput
from langflow.io import Output
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.schema.dotdict import dotdict
diff --git a/src/backend/base/langflow/components/processing/data_to_dataframe.py b/src/backend/base/langflow/components/processing/data_to_dataframe.py
index e7abc176d..1620f7b82 100644
--- a/src/backend/base/langflow/components/processing/data_to_dataframe.py
+++ b/src/backend/base/langflow/components/processing/data_to_dataframe.py
@@ -1,6 +1,7 @@
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import DataInput, Output
-from langflow.schema import Data, DataFrame
+from langflow.schema.data import Data
+from langflow.schema.dataframe import DataFrame
class DataToDataFrameComponent(Component):
diff --git a/src/backend/base/langflow/components/processing/dataframe_operations.py b/src/backend/base/langflow/components/processing/dataframe_operations.py
index f217d3770..b41fb9c14 100644
--- a/src/backend/base/langflow/components/processing/dataframe_operations.py
+++ b/src/backend/base/langflow/components/processing/dataframe_operations.py
@@ -1,6 +1,6 @@
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import BoolInput, DataFrameInput, DropdownInput, IntInput, MessageTextInput, Output, StrInput
-from langflow.schema import DataFrame
+from langflow.schema.dataframe import DataFrame
class DataFrameOperationsComponent(Component):
diff --git a/src/backend/base/langflow/components/processing/extract_key.py b/src/backend/base/langflow/components/processing/extract_key.py
index eaeb82b72..b9054cd64 100644
--- a/src/backend/base/langflow/components/processing/extract_key.py
+++ b/src/backend/base/langflow/components/processing/extract_key.py
@@ -1,6 +1,6 @@
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import DataInput, Output, StrInput
-from langflow.schema import Data
+from langflow.schema.data import Data
class ExtractDataKeyComponent(Component):
diff --git a/src/backend/base/langflow/components/processing/filter_data.py b/src/backend/base/langflow/components/processing/filter_data.py
index 4022ddde0..99a6213d6 100644
--- a/src/backend/base/langflow/components/processing/filter_data.py
+++ b/src/backend/base/langflow/components/processing/filter_data.py
@@ -1,6 +1,6 @@
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import DataInput, MessageTextInput, Output
-from langflow.schema import Data
+from langflow.schema.data import Data
class FilterDataComponent(Component):
diff --git a/src/backend/base/langflow/components/processing/filter_data_values.py b/src/backend/base/langflow/components/processing/filter_data_values.py
index df3675c29..c2aab6b4e 100644
--- a/src/backend/base/langflow/components/processing/filter_data_values.py
+++ b/src/backend/base/langflow/components/processing/filter_data_values.py
@@ -1,8 +1,8 @@
from typing import Any
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import DataInput, DropdownInput, MessageTextInput, Output
-from langflow.schema import Data
+from langflow.schema.data import Data
class DataFilterComponent(Component):
diff --git a/src/backend/base/langflow/components/processing/json_cleaner.py b/src/backend/base/langflow/components/processing/json_cleaner.py
index f7cebbb6c..d8b8290cf 100644
--- a/src/backend/base/langflow/components/processing/json_cleaner.py
+++ b/src/backend/base/langflow/components/processing/json_cleaner.py
@@ -1,10 +1,10 @@
import json
import unicodedata
-from langflow.custom import Component
-from langflow.inputs import BoolInput, MessageTextInput
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import BoolInput, MessageTextInput
from langflow.schema.message import Message
-from langflow.template import Output
+from langflow.template.field.base import Output
class JSONCleaner(Component):
diff --git a/src/backend/base/langflow/components/processing/lambda_filter.py b/src/backend/base/langflow/components/processing/lambda_filter.py
index ae050cc03..d2e52d534 100644
--- a/src/backend/base/langflow/components/processing/lambda_filter.py
+++ b/src/backend/base/langflow/components/processing/lambda_filter.py
@@ -4,9 +4,9 @@ import json
import re
from typing import TYPE_CHECKING, Any
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import DataInput, HandleInput, IntInput, MultilineInput, Output
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.schema.dataframe import DataFrame
from langflow.utils.data_structure import get_data_structure
diff --git a/src/backend/base/langflow/components/processing/llm_router.py b/src/backend/base/langflow/components/processing/llm_router.py
index 413ef555d..0d007dabc 100644
--- a/src/backend/base/langflow/components/processing/llm_router.py
+++ b/src/backend/base/langflow/components/processing/llm_router.py
@@ -7,10 +7,11 @@ import aiohttp
from langflow.base.models.chat_result import get_chat_result
from langflow.base.models.model_utils import get_model_name
-from langflow.custom import Component
-from langflow.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, Output
-from langflow.schema import Data
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput
+from langflow.schema.data import Data
from langflow.schema.message import Message
+from langflow.template.field.base import Output
class LLMRouterComponent(Component):
diff --git a/src/backend/base/langflow/components/processing/merge_data.py b/src/backend/base/langflow/components/processing/merge_data.py
index 6d8fabd22..74f2b816c 100644
--- a/src/backend/base/langflow/components/processing/merge_data.py
+++ b/src/backend/base/langflow/components/processing/merge_data.py
@@ -3,9 +3,9 @@ from typing import cast
from loguru import logger
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import DataInput, DropdownInput, Output
-from langflow.schema import DataFrame
+from langflow.schema.dataframe import DataFrame
class DataOperation(str, Enum):
diff --git a/src/backend/base/langflow/components/processing/message_to_data.py b/src/backend/base/langflow/components/processing/message_to_data.py
index 6aee3e4b4..fe15dfd3e 100644
--- a/src/backend/base/langflow/components/processing/message_to_data.py
+++ b/src/backend/base/langflow/components/processing/message_to_data.py
@@ -1,8 +1,8 @@
from loguru import logger
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import MessageInput, Output
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.schema.message import Message
diff --git a/src/backend/base/langflow/components/processing/parse_data.py b/src/backend/base/langflow/components/processing/parse_data.py
index ff376bdd1..2608a09b6 100644
--- a/src/backend/base/langflow/components/processing/parse_data.py
+++ b/src/backend/base/langflow/components/processing/parse_data.py
@@ -1,7 +1,7 @@
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.helpers.data import data_to_text, data_to_text_list
from langflow.io import DataInput, MultilineInput, Output, StrInput
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.schema.message import Message
diff --git a/src/backend/base/langflow/components/processing/parse_dataframe.py b/src/backend/base/langflow/components/processing/parse_dataframe.py
index ec6cfdf12..ce1d8f076 100644
--- a/src/backend/base/langflow/components/processing/parse_dataframe.py
+++ b/src/backend/base/langflow/components/processing/parse_dataframe.py
@@ -1,4 +1,4 @@
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import DataFrameInput, MultilineInput, Output, StrInput
from langflow.schema.message import Message
diff --git a/src/backend/base/langflow/components/processing/parse_json_data.py b/src/backend/base/langflow/components/processing/parse_json_data.py
index 0881b0b94..7180f0898 100644
--- a/src/backend/base/langflow/components/processing/parse_json_data.py
+++ b/src/backend/base/langflow/components/processing/parse_json_data.py
@@ -5,10 +5,10 @@ import jq
from json_repair import repair_json
from loguru import logger
-from langflow.custom import Component
-from langflow.inputs import HandleInput, MessageTextInput
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import HandleInput, MessageTextInput
from langflow.io import Output
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.schema.message import Message
diff --git a/src/backend/base/langflow/components/processing/parser.py b/src/backend/base/langflow/components/processing/parser.py
index 7ea3d298e..2c7392246 100644
--- a/src/backend/base/langflow/components/processing/parser.py
+++ b/src/backend/base/langflow/components/processing/parser.py
@@ -1,15 +1,10 @@
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.helpers.data import safe_convert
-from langflow.io import (
- BoolInput,
- HandleInput,
- MessageTextInput,
- MultilineInput,
- Output,
- TabInput,
-)
-from langflow.schema import Data, DataFrame
+from langflow.inputs.inputs import BoolInput, HandleInput, MessageTextInput, MultilineInput, TabInput
+from langflow.schema.data import Data
+from langflow.schema.dataframe import DataFrame
from langflow.schema.message import Message
+from langflow.template.field.base import Output
class ParserComponent(Component):
diff --git a/src/backend/base/langflow/components/processing/python_repl_core.py b/src/backend/base/langflow/components/processing/python_repl_core.py
index 053009d08..21627d85e 100644
--- a/src/backend/base/langflow/components/processing/python_repl_core.py
+++ b/src/backend/base/langflow/components/processing/python_repl_core.py
@@ -2,9 +2,9 @@ import importlib
from langchain_experimental.utilities import PythonREPL
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import CodeInput, Output, StrInput
-from langflow.schema import Data
+from langflow.schema.data import Data
class PythonREPLComponent(Component):
diff --git a/src/backend/base/langflow/components/processing/regex.py b/src/backend/base/langflow/components/processing/regex.py
index 887443e5a..49c4ccca3 100644
--- a/src/backend/base/langflow/components/processing/regex.py
+++ b/src/backend/base/langflow/components/processing/regex.py
@@ -1,8 +1,8 @@
import re
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import MessageTextInput, Output
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.schema.message import Message
diff --git a/src/backend/base/langflow/components/processing/select_data.py b/src/backend/base/langflow/components/processing/select_data.py
index 577c1d4fc..82b839b90 100644
--- a/src/backend/base/langflow/components/processing/select_data.py
+++ b/src/backend/base/langflow/components/processing/select_data.py
@@ -1,8 +1,8 @@
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.field_typing.range_spec import RangeSpec
from langflow.inputs.inputs import DataInput, IntInput
from langflow.io import Output
-from langflow.schema import Data
+from langflow.schema.data import Data
class SelectDataComponent(Component):
diff --git a/src/backend/base/langflow/components/processing/split_text.py b/src/backend/base/langflow/components/processing/split_text.py
index 50e805a90..c07600e62 100644
--- a/src/backend/base/langflow/components/processing/split_text.py
+++ b/src/backend/base/langflow/components/processing/split_text.py
@@ -1,8 +1,9 @@
from langchain_text_splitters import CharacterTextSplitter
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import DropdownInput, HandleInput, IntInput, MessageTextInput, Output
-from langflow.schema import Data, DataFrame
+from langflow.schema.data import Data
+from langflow.schema.dataframe import DataFrame
from langflow.utils.util import unescape_string
diff --git a/src/backend/base/langflow/components/processing/structured_output.py b/src/backend/base/langflow/components/processing/structured_output.py
index 987622a5c..2c6bfb749 100644
--- a/src/backend/base/langflow/components/processing/structured_output.py
+++ b/src/backend/base/langflow/components/processing/structured_output.py
@@ -2,7 +2,7 @@ from pydantic import BaseModel, Field, create_model
from trustcall import create_extractor
from langflow.base.models.chat_result import get_chat_result
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.helpers.base_model import build_model_from_schema
from langflow.io import (
BoolInput,
diff --git a/src/backend/base/langflow/components/processing/update_data.py b/src/backend/base/langflow/components/processing/update_data.py
index 2b4c04313..38362cc93 100644
--- a/src/backend/base/langflow/components/processing/update_data.py
+++ b/src/backend/base/langflow/components/processing/update_data.py
@@ -1,6 +1,6 @@
from typing import Any
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.field_typing.range_spec import RangeSpec
from langflow.inputs.inputs import (
BoolInput,
@@ -10,7 +10,7 @@ from langflow.inputs.inputs import (
MessageTextInput,
)
from langflow.io import Output
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.schema.dotdict import dotdict
diff --git a/src/backend/base/langflow/components/prompts/prompt.py b/src/backend/base/langflow/components/prompts/prompt.py
index ed140b483..b23c677db 100644
--- a/src/backend/base/langflow/components/prompts/prompt.py
+++ b/src/backend/base/langflow/components/prompts/prompt.py
@@ -1,5 +1,5 @@
from langflow.base.prompts.api_utils import process_prompt_template
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.inputs.inputs import DefaultPromptField
from langflow.io import MessageTextInput, Output, PromptInput
from langflow.schema.message import Message
diff --git a/src/backend/base/langflow/components/prototypes/python_function.py b/src/backend/base/langflow/components/prototypes/python_function.py
index cb415586e..d2646e31c 100644
--- a/src/backend/base/langflow/components/prototypes/python_function.py
+++ b/src/backend/base/langflow/components/prototypes/python_function.py
@@ -2,10 +2,11 @@ from collections.abc import Callable
from loguru import logger
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.custom.utils import get_function
from langflow.io import CodeInput, Output
-from langflow.schema import Data, dotdict
+from langflow.schema.data import Data
+from langflow.schema.dotdict import dotdict
from langflow.schema.message import Message
diff --git a/src/backend/base/langflow/components/retrievers/amazon_kendra.py b/src/backend/base/langflow/components/retrievers/amazon_kendra.py
index 8fdd9cc19..4f4f27190 100644
--- a/src/backend/base/langflow/components/retrievers/amazon_kendra.py
+++ b/src/backend/base/langflow/components/retrievers/amazon_kendra.py
@@ -2,7 +2,7 @@ from typing import cast
from langchain_community.retrievers import AmazonKendraRetriever
-from langflow.custom import CustomComponent
+from langflow.custom.custom_component.custom_component import CustomComponent
from langflow.field_typing import Retriever
diff --git a/src/backend/base/langflow/components/retrievers/metal.py b/src/backend/base/langflow/components/retrievers/metal.py
index 88c228f26..d546de5d5 100644
--- a/src/backend/base/langflow/components/retrievers/metal.py
+++ b/src/backend/base/langflow/components/retrievers/metal.py
@@ -3,7 +3,7 @@ from typing import cast
from langchain_community.retrievers import MetalRetriever
from metal_sdk.metal import Metal
-from langflow.custom import CustomComponent
+from langflow.custom.custom_component.custom_component import CustomComponent
from langflow.field_typing import Retriever
diff --git a/src/backend/base/langflow/components/retrievers/multi_query.py b/src/backend/base/langflow/components/retrievers/multi_query.py
index 6d381affc..d20eecb30 100644
--- a/src/backend/base/langflow/components/retrievers/multi_query.py
+++ b/src/backend/base/langflow/components/retrievers/multi_query.py
@@ -1,6 +1,6 @@
from langchain.retrievers import MultiQueryRetriever
-from langflow.custom import CustomComponent
+from langflow.custom.custom_component.custom_component import CustomComponent
from langflow.field_typing import BaseRetriever, LanguageModel, PromptTemplate, Text
diff --git a/src/backend/base/langflow/components/scrapegraph/scrapegraph_markdownify_api.py b/src/backend/base/langflow/components/scrapegraph/scrapegraph_markdownify_api.py
index 79ffff8bd..78d149735 100644
--- a/src/backend/base/langflow/components/scrapegraph/scrapegraph_markdownify_api.py
+++ b/src/backend/base/langflow/components/scrapegraph/scrapegraph_markdownify_api.py
@@ -1,10 +1,10 @@
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import (
MessageTextInput,
Output,
SecretStrInput,
)
-from langflow.schema import Data
+from langflow.schema.data import Data
class ScrapeGraphMarkdownifyApi(Component):
diff --git a/src/backend/base/langflow/components/scrapegraph/scrapegraph_search_api.py b/src/backend/base/langflow/components/scrapegraph/scrapegraph_search_api.py
index 22778ce68..a24f339df 100644
--- a/src/backend/base/langflow/components/scrapegraph/scrapegraph_search_api.py
+++ b/src/backend/base/langflow/components/scrapegraph/scrapegraph_search_api.py
@@ -1,10 +1,10 @@
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import (
MessageTextInput,
Output,
SecretStrInput,
)
-from langflow.schema import Data
+from langflow.schema.data import Data
class ScrapeGraphSearchApi(Component):
diff --git a/src/backend/base/langflow/components/scrapegraph/scrapegraph_smart_scraper_api.py b/src/backend/base/langflow/components/scrapegraph/scrapegraph_smart_scraper_api.py
index 8a33f34b4..6e249381e 100644
--- a/src/backend/base/langflow/components/scrapegraph/scrapegraph_smart_scraper_api.py
+++ b/src/backend/base/langflow/components/scrapegraph/scrapegraph_smart_scraper_api.py
@@ -1,10 +1,10 @@
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import (
MessageTextInput,
Output,
SecretStrInput,
)
-from langflow.schema import Data
+from langflow.schema.data import Data
class ScrapeGraphSmartScraperApi(Component):
diff --git a/src/backend/base/langflow/components/search/arxiv.py b/src/backend/base/langflow/components/search/arxiv.py
index 41524edf6..9ec830c94 100644
--- a/src/backend/base/langflow/components/search/arxiv.py
+++ b/src/backend/base/langflow/components/search/arxiv.py
@@ -4,9 +4,10 @@ from xml.etree.ElementTree import Element
from defusedxml.ElementTree import fromstring
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import DropdownInput, IntInput, MessageTextInput, Output
-from langflow.schema import Data, DataFrame
+from langflow.schema.data import Data
+from langflow.schema.dataframe import DataFrame
class ArXivComponent(Component):
diff --git a/src/backend/base/langflow/components/search/bing_search_api.py b/src/backend/base/langflow/components/search/bing_search_api.py
index 0b85cf5f8..1b0d751a4 100644
--- a/src/backend/base/langflow/components/search/bing_search_api.py
+++ b/src/backend/base/langflow/components/search/bing_search_api.py
@@ -5,9 +5,10 @@ from langchain_community.utilities import BingSearchAPIWrapper
from langflow.base.langchain_utilities.model import LCToolComponent
from langflow.field_typing import Tool
-from langflow.inputs import IntInput, MessageTextInput, MultilineInput, SecretStrInput
-from langflow.io import Output
-from langflow.schema import Data, DataFrame
+from langflow.inputs.inputs import IntInput, MessageTextInput, MultilineInput, SecretStrInput
+from langflow.schema.data import Data
+from langflow.schema.dataframe import DataFrame
+from langflow.template.field.base import Output
class BingSearchAPIComponent(LCToolComponent):
diff --git a/src/backend/base/langflow/components/search/duck_duck_go_search_run.py b/src/backend/base/langflow/components/search/duck_duck_go_search_run.py
index bfd7f14e3..ccd779f84 100644
--- a/src/backend/base/langflow/components/search/duck_duck_go_search_run.py
+++ b/src/backend/base/langflow/components/search/duck_duck_go_search_run.py
@@ -1,9 +1,10 @@
from langchain_community.tools import DuckDuckGoSearchRun
-from langflow.custom import Component
-from langflow.inputs import IntInput, MessageTextInput
-from langflow.io import Output
-from langflow.schema import Data, DataFrame
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import IntInput, MessageTextInput
+from langflow.schema.data import Data
+from langflow.schema.dataframe import DataFrame
+from langflow.template.field.base import Output
class DuckDuckGoSearchComponent(Component):
diff --git a/src/backend/base/langflow/components/search/exa_search.py b/src/backend/base/langflow/components/search/exa_search.py
index b0600fc26..1553ad7fc 100644
--- a/src/backend/base/langflow/components/search/exa_search.py
+++ b/src/backend/base/langflow/components/search/exa_search.py
@@ -1,7 +1,7 @@
from langchain_core.tools import tool
from metaphor_python import Metaphor
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.field_typing import Tool
from langflow.io import BoolInput, IntInput, Output, SecretStrInput
diff --git a/src/backend/base/langflow/components/search/glean_search_api.py b/src/backend/base/langflow/components/search/glean_search_api.py
index 39f27dec2..7c842ea62 100644
--- a/src/backend/base/langflow/components/search/glean_search_api.py
+++ b/src/backend/base/langflow/components/search/glean_search_api.py
@@ -9,9 +9,10 @@ from pydantic.v1 import Field
from langflow.base.langchain_utilities.model import LCToolComponent
from langflow.field_typing import Tool
-from langflow.inputs import IntInput, MultilineInput, NestedDictInput, SecretStrInput, StrInput
+from langflow.inputs.inputs import IntInput, MultilineInput, NestedDictInput, SecretStrInput, StrInput
from langflow.io import Output
-from langflow.schema import Data, DataFrame
+from langflow.schema.data import Data
+from langflow.schema.dataframe import DataFrame
class GleanSearchAPISchema(BaseModel):
diff --git a/src/backend/base/langflow/components/search/google_search_api_core.py b/src/backend/base/langflow/components/search/google_search_api_core.py
index fda4d6adf..7e553d9a2 100644
--- a/src/backend/base/langflow/components/search/google_search_api_core.py
+++ b/src/backend/base/langflow/components/search/google_search_api_core.py
@@ -1,8 +1,8 @@
from langchain_google_community import GoogleSearchAPIWrapper
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import IntInput, MultilineInput, Output, SecretStrInput
-from langflow.schema import DataFrame
+from langflow.schema.dataframe import DataFrame
class GoogleSearchAPICore(Component):
diff --git a/src/backend/base/langflow/components/search/google_serper_api_core.py b/src/backend/base/langflow/components/search/google_serper_api_core.py
index a46e19057..86bd70ca7 100644
--- a/src/backend/base/langflow/components/search/google_serper_api_core.py
+++ b/src/backend/base/langflow/components/search/google_serper_api_core.py
@@ -1,8 +1,8 @@
from langchain_community.utilities.google_serper import GoogleSerperAPIWrapper
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.io import IntInput, MultilineInput, Output, SecretStrInput
-from langflow.schema import DataFrame
+from langflow.schema.dataframe import DataFrame
from langflow.schema.message import Message
diff --git a/src/backend/base/langflow/components/search/search.py b/src/backend/base/langflow/components/search/search.py
index 5c634455e..2bdf15a0f 100644
--- a/src/backend/base/langflow/components/search/search.py
+++ b/src/backend/base/langflow/components/search/search.py
@@ -2,10 +2,11 @@ from typing import Any
from langchain_community.utilities.searchapi import SearchApiAPIWrapper
-from langflow.custom import Component
-from langflow.inputs import DictInput, DropdownInput, IntInput, MultilineInput, SecretStrInput
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import DictInput, DropdownInput, IntInput, MultilineInput, SecretStrInput
from langflow.io import Output
-from langflow.schema import Data, DataFrame
+from langflow.schema.data import Data
+from langflow.schema.dataframe import DataFrame
class SearchComponent(Component):
diff --git a/src/backend/base/langflow/components/search/serp.py b/src/backend/base/langflow/components/search/serp.py
index 51e1f981e..20ab1ca07 100644
--- a/src/backend/base/langflow/components/search/serp.py
+++ b/src/backend/base/langflow/components/search/serp.py
@@ -5,10 +5,10 @@ from langchain_core.tools import ToolException
from loguru import logger
from pydantic import BaseModel, Field
-from langflow.custom import Component
-from langflow.inputs import DictInput, IntInput, MultilineInput, SecretStrInput
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import DictInput, IntInput, MultilineInput, SecretStrInput
from langflow.io import Output
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.schema.message import Message
diff --git a/src/backend/base/langflow/components/search/wikidata.py b/src/backend/base/langflow/components/search/wikidata.py
index 3cfed8636..734a7450a 100644
--- a/src/backend/base/langflow/components/search/wikidata.py
+++ b/src/backend/base/langflow/components/search/wikidata.py
@@ -2,9 +2,11 @@ import httpx
from httpx import HTTPError
from langchain_core.tools import ToolException
-from langflow.custom import Component
-from langflow.io import MultilineInput, Output
-from langflow.schema import Data, DataFrame
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import MultilineInput
+from langflow.schema.data import Data
+from langflow.schema.dataframe import DataFrame
+from langflow.template.field.base import Output
class WikidataComponent(Component):
diff --git a/src/backend/base/langflow/components/search/wikipedia.py b/src/backend/base/langflow/components/search/wikipedia.py
index db2032393..e72e3c724 100644
--- a/src/backend/base/langflow/components/search/wikipedia.py
+++ b/src/backend/base/langflow/components/search/wikipedia.py
@@ -1,9 +1,10 @@
from langchain_community.utilities.wikipedia import WikipediaAPIWrapper
-from langflow.custom import Component
-from langflow.inputs import BoolInput, IntInput, MessageTextInput, MultilineInput
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import BoolInput, IntInput, MessageTextInput, MultilineInput
from langflow.io import Output
-from langflow.schema import Data, DataFrame
+from langflow.schema.data import Data
+from langflow.schema.dataframe import DataFrame
class WikipediaComponent(Component):
diff --git a/src/backend/base/langflow/components/search/wolfram_alpha_api.py b/src/backend/base/langflow/components/search/wolfram_alpha_api.py
index a0b1cc7bc..34482b020 100644
--- a/src/backend/base/langflow/components/search/wolfram_alpha_api.py
+++ b/src/backend/base/langflow/components/search/wolfram_alpha_api.py
@@ -2,9 +2,10 @@ from langchain_community.utilities.wolfram_alpha import WolframAlphaAPIWrapper
from langflow.base.langchain_utilities.model import LCToolComponent
from langflow.field_typing import Tool
-from langflow.inputs import MultilineInput, SecretStrInput
+from langflow.inputs.inputs import MultilineInput, SecretStrInput
from langflow.io import Output
-from langflow.schema import Data, DataFrame
+from langflow.schema.data import Data
+from langflow.schema.dataframe import DataFrame
class WolframAlphaAPIComponent(LCToolComponent):
diff --git a/src/backend/base/langflow/components/search/yahoo.py b/src/backend/base/langflow/components/search/yahoo.py
index d391b13b6..09824ca1f 100644
--- a/src/backend/base/langflow/components/search/yahoo.py
+++ b/src/backend/base/langflow/components/search/yahoo.py
@@ -7,10 +7,11 @@ from langchain_core.tools import ToolException
from loguru import logger
from pydantic import BaseModel, Field
-from langflow.custom import Component
-from langflow.inputs import DropdownInput, IntInput, MessageTextInput
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import DropdownInput, IntInput, MessageTextInput
from langflow.io import Output
-from langflow.schema import Data, DataFrame
+from langflow.schema.data import Data
+from langflow.schema.dataframe import DataFrame
class YahooFinanceMethod(Enum):
diff --git a/src/backend/base/langflow/components/tavily/tavily_search.py b/src/backend/base/langflow/components/tavily/tavily_search.py
index a20c61b1e..4ffe00110 100644
--- a/src/backend/base/langflow/components/tavily/tavily_search.py
+++ b/src/backend/base/langflow/components/tavily/tavily_search.py
@@ -1,10 +1,11 @@
import httpx
from loguru import logger
-from langflow.custom import Component
-from langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, Output, SecretStrInput
-from langflow.schema import Data
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput
+from langflow.schema.data import Data
from langflow.schema.dataframe import DataFrame
+from langflow.template.field.base import Output
class TavilySearchComponent(Component):
diff --git a/src/backend/base/langflow/components/tools/calculator.py b/src/backend/base/langflow/components/tools/calculator.py
index 1d13ed9f0..eecf81322 100644
--- a/src/backend/base/langflow/components/tools/calculator.py
+++ b/src/backend/base/langflow/components/tools/calculator.py
@@ -8,8 +8,8 @@ from pydantic import BaseModel, Field
from langflow.base.langchain_utilities.model import LCToolComponent
from langflow.field_typing import Tool
-from langflow.inputs import MessageTextInput
-from langflow.schema import Data
+from langflow.inputs.inputs import MessageTextInput
+from langflow.schema.data import Data
class CalculatorToolComponent(LCToolComponent):
diff --git a/src/backend/base/langflow/components/tools/google_search_api.py b/src/backend/base/langflow/components/tools/google_search_api.py
index 2ede883af..267d3305a 100644
--- a/src/backend/base/langflow/components/tools/google_search_api.py
+++ b/src/backend/base/langflow/components/tools/google_search_api.py
@@ -1,8 +1,8 @@
from langchain_core.tools import Tool
from langflow.base.langchain_utilities.model import LCToolComponent
-from langflow.inputs import IntInput, MultilineInput, SecretStrInput
-from langflow.schema import Data
+from langflow.inputs.inputs import IntInput, MultilineInput, SecretStrInput
+from langflow.schema.data import Data
class GoogleSearchAPIComponent(LCToolComponent):
diff --git a/src/backend/base/langflow/components/tools/google_serper_api.py b/src/backend/base/langflow/components/tools/google_serper_api.py
index 0b44c4db3..e78d58cfb 100644
--- a/src/backend/base/langflow/components/tools/google_serper_api.py
+++ b/src/backend/base/langflow/components/tools/google_serper_api.py
@@ -6,14 +6,14 @@ from pydantic import BaseModel, Field
from langflow.base.langchain_utilities.model import LCToolComponent
from langflow.field_typing import Tool
-from langflow.inputs import (
+from langflow.inputs.inputs import (
DictInput,
DropdownInput,
IntInput,
MultilineInput,
SecretStrInput,
)
-from langflow.schema import Data
+from langflow.schema.data import Data
class QuerySchema(BaseModel):
diff --git a/src/backend/base/langflow/components/tools/python_code_structured_tool.py b/src/backend/base/langflow/components/tools/python_code_structured_tool.py
index a37619a69..deded3269 100644
--- a/src/backend/base/langflow/components/tools/python_code_structured_tool.py
+++ b/src/backend/base/langflow/components/tools/python_code_structured_tool.py
@@ -19,7 +19,7 @@ from langflow.inputs.inputs import (
MultilineInput,
)
from langflow.io import Output
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.schema.dotdict import dotdict
diff --git a/src/backend/base/langflow/components/tools/python_repl.py b/src/backend/base/langflow/components/tools/python_repl.py
index 0f453e41d..b60ccb9d7 100644
--- a/src/backend/base/langflow/components/tools/python_repl.py
+++ b/src/backend/base/langflow/components/tools/python_repl.py
@@ -8,8 +8,8 @@ from pydantic import BaseModel, Field
from langflow.base.langchain_utilities.model import LCToolComponent
from langflow.field_typing import Tool
-from langflow.inputs import StrInput
-from langflow.schema import Data
+from langflow.inputs.inputs import StrInput
+from langflow.schema.data import Data
class PythonREPLToolComponent(LCToolComponent):
diff --git a/src/backend/base/langflow/components/tools/search_api.py b/src/backend/base/langflow/components/tools/search_api.py
index 80f8cc61c..46fe3e925 100644
--- a/src/backend/base/langflow/components/tools/search_api.py
+++ b/src/backend/base/langflow/components/tools/search_api.py
@@ -6,8 +6,8 @@ from pydantic import BaseModel, Field
from langflow.base.langchain_utilities.model import LCToolComponent
from langflow.field_typing import Tool
-from langflow.inputs import DictInput, IntInput, MessageTextInput, MultilineInput, SecretStrInput
-from langflow.schema import Data
+from langflow.inputs.inputs import DictInput, IntInput, MessageTextInput, MultilineInput, SecretStrInput
+from langflow.schema.data import Data
class SearchAPIComponent(LCToolComponent):
diff --git a/src/backend/base/langflow/components/tools/searxng.py b/src/backend/base/langflow/components/tools/searxng.py
index b87e76229..8ad7f99f7 100644
--- a/src/backend/base/langflow/components/tools/searxng.py
+++ b/src/backend/base/langflow/components/tools/searxng.py
@@ -9,7 +9,7 @@ from loguru import logger
from pydantic.v1 import Field, create_model
from langflow.base.langchain_utilities.model import LCToolComponent
-from langflow.inputs import DropdownInput, IntInput, MessageTextInput, MultiselectInput
+from langflow.inputs.inputs import DropdownInput, IntInput, MessageTextInput, MultiselectInput
from langflow.io import Output
from langflow.schema.dotdict import dotdict
diff --git a/src/backend/base/langflow/components/tools/serp_api.py b/src/backend/base/langflow/components/tools/serp_api.py
index ba9913c57..920347b75 100644
--- a/src/backend/base/langflow/components/tools/serp_api.py
+++ b/src/backend/base/langflow/components/tools/serp_api.py
@@ -8,8 +8,8 @@ from pydantic import BaseModel, Field
from langflow.base.langchain_utilities.model import LCToolComponent
from langflow.field_typing import Tool
-from langflow.inputs import DictInput, IntInput, MultilineInput, SecretStrInput
-from langflow.schema import Data
+from langflow.inputs.inputs import DictInput, IntInput, MultilineInput, SecretStrInput
+from langflow.schema.data import Data
class SerpAPISchema(BaseModel):
diff --git a/src/backend/base/langflow/components/tools/tavily_search_tool.py b/src/backend/base/langflow/components/tools/tavily_search_tool.py
index 2515e2c9e..cdbd53c96 100644
--- a/src/backend/base/langflow/components/tools/tavily_search_tool.py
+++ b/src/backend/base/langflow/components/tools/tavily_search_tool.py
@@ -8,8 +8,8 @@ from pydantic import BaseModel, Field
from langflow.base.langchain_utilities.model import LCToolComponent
from langflow.field_typing import Tool
-from langflow.inputs import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput
-from langflow.schema import Data
+from langflow.inputs.inputs import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput
+from langflow.schema.data import Data
# Add at the top with other constants
MAX_CHUNKS_PER_SOURCE = 3
diff --git a/src/backend/base/langflow/components/tools/wikidata_api.py b/src/backend/base/langflow/components/tools/wikidata_api.py
index eb9d0a286..755784e44 100644
--- a/src/backend/base/langflow/components/tools/wikidata_api.py
+++ b/src/backend/base/langflow/components/tools/wikidata_api.py
@@ -6,8 +6,8 @@ from pydantic import BaseModel, Field
from langflow.base.langchain_utilities.model import LCToolComponent
from langflow.field_typing import Tool
-from langflow.inputs import MultilineInput
-from langflow.schema import Data
+from langflow.inputs.inputs import MultilineInput
+from langflow.schema.data import Data
class WikidataSearchSchema(BaseModel):
diff --git a/src/backend/base/langflow/components/tools/wikipedia_api.py b/src/backend/base/langflow/components/tools/wikipedia_api.py
index 06ab7478d..0608bbcd1 100644
--- a/src/backend/base/langflow/components/tools/wikipedia_api.py
+++ b/src/backend/base/langflow/components/tools/wikipedia_api.py
@@ -5,8 +5,8 @@ from langchain_community.utilities.wikipedia import WikipediaAPIWrapper
from langflow.base.langchain_utilities.model import LCToolComponent
from langflow.field_typing import Tool
-from langflow.inputs import BoolInput, IntInput, MessageTextInput, MultilineInput
-from langflow.schema import Data
+from langflow.inputs.inputs import BoolInput, IntInput, MessageTextInput, MultilineInput
+from langflow.schema.data import Data
class WikipediaAPIComponent(LCToolComponent):
diff --git a/src/backend/base/langflow/components/tools/yahoo_finance.py b/src/backend/base/langflow/components/tools/yahoo_finance.py
index 7f7e4a09e..0fca70e94 100644
--- a/src/backend/base/langflow/components/tools/yahoo_finance.py
+++ b/src/backend/base/langflow/components/tools/yahoo_finance.py
@@ -10,8 +10,8 @@ from pydantic import BaseModel, Field
from langflow.base.langchain_utilities.model import LCToolComponent
from langflow.field_typing import Tool
-from langflow.inputs import DropdownInput, IntInput, MessageTextInput
-from langflow.schema import Data
+from langflow.inputs.inputs import DropdownInput, IntInput, MessageTextInput
+from langflow.schema.data import Data
class YahooFinanceMethod(Enum):
diff --git a/src/backend/base/langflow/components/unstructured/unstructured.py b/src/backend/base/langflow/components/unstructured/unstructured.py
index f4946fc2e..d82828dd3 100644
--- a/src/backend/base/langflow/components/unstructured/unstructured.py
+++ b/src/backend/base/langflow/components/unstructured/unstructured.py
@@ -1,8 +1,8 @@
from langchain_unstructured import UnstructuredLoader
-from langflow.base.data import BaseFileComponent
-from langflow.inputs import DropdownInput, MessageTextInput, NestedDictInput, SecretStrInput
-from langflow.schema import Data
+from langflow.base.data.base_file import BaseFileComponent
+from langflow.inputs.inputs import DropdownInput, MessageTextInput, NestedDictInput, SecretStrInput
+from langflow.schema.data import Data
class UnstructuredComponent(BaseFileComponent):
diff --git a/src/backend/base/langflow/components/vectorstores/astradb.py b/src/backend/base/langflow/components/vectorstores/astradb.py
index b9d782c66..386bb861f 100644
--- a/src/backend/base/langflow/components/vectorstores/astradb.py
+++ b/src/backend/base/langflow/components/vectorstores/astradb.py
@@ -10,8 +10,8 @@ from langchain_astradb.utils.astradb import HybridSearchMode, _AstraDBCollection
from langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store
from langflow.base.vectorstores.vector_store_connection_decorator import vector_store_connection
-from langflow.helpers import docs_to_data
-from langflow.inputs import FloatInput, NestedDictInput
+from langflow.helpers.data import docs_to_data
+from langflow.inputs.inputs import FloatInput, NestedDictInput
from langflow.io import (
BoolInput,
DropdownInput,
@@ -21,7 +21,7 @@ from langflow.io import (
SecretStrInput,
StrInput,
)
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.utils.version import get_version_info
diff --git a/src/backend/base/langflow/components/vectorstores/astradb_graph.py b/src/backend/base/langflow/components/vectorstores/astradb_graph.py
index 0d027e652..30cece80f 100644
--- a/src/backend/base/langflow/components/vectorstores/astradb_graph.py
+++ b/src/backend/base/langflow/components/vectorstores/astradb_graph.py
@@ -4,8 +4,8 @@ import orjson
from astrapy.admin import parse_api_endpoint
from langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store
-from langflow.helpers import docs_to_data
-from langflow.inputs import (
+from langflow.helpers.data import docs_to_data
+from langflow.inputs.inputs import (
BoolInput,
DictInput,
DropdownInput,
@@ -15,7 +15,7 @@ from langflow.inputs import (
SecretStrInput,
StrInput,
)
-from langflow.schema import Data
+from langflow.schema.data import Data
class AstraDBGraphVectorStoreComponent(LCVectorStoreComponent):
diff --git a/src/backend/base/langflow/components/vectorstores/cassandra.py b/src/backend/base/langflow/components/vectorstores/cassandra.py
index 6414432d5..4dc86b7f6 100644
--- a/src/backend/base/langflow/components/vectorstores/cassandra.py
+++ b/src/backend/base/langflow/components/vectorstores/cassandra.py
@@ -2,7 +2,7 @@ from langchain_community.vectorstores import Cassandra
from langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store
from langflow.helpers.data import docs_to_data
-from langflow.inputs import BoolInput, DictInput, FloatInput
+from langflow.inputs.inputs import BoolInput, DictInput, FloatInput
from langflow.io import (
DropdownInput,
HandleInput,
@@ -10,7 +10,7 @@ from langflow.io import (
MessageTextInput,
SecretStrInput,
)
-from langflow.schema import Data
+from langflow.schema.data import Data
class CassandraVectorStoreComponent(LCVectorStoreComponent):
diff --git a/src/backend/base/langflow/components/vectorstores/cassandra_graph.py b/src/backend/base/langflow/components/vectorstores/cassandra_graph.py
index 418ae3261..6183c1835 100644
--- a/src/backend/base/langflow/components/vectorstores/cassandra_graph.py
+++ b/src/backend/base/langflow/components/vectorstores/cassandra_graph.py
@@ -4,7 +4,7 @@ from langchain_community.graph_vectorstores import CassandraGraphVectorStore
from langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store
from langflow.helpers.data import docs_to_data
-from langflow.inputs import DictInput, FloatInput
+from langflow.inputs.inputs import DictInput, FloatInput
from langflow.io import (
DropdownInput,
HandleInput,
@@ -12,7 +12,7 @@ from langflow.io import (
MessageTextInput,
SecretStrInput,
)
-from langflow.schema import Data
+from langflow.schema.data import Data
class CassandraGraphVectorStoreComponent(LCVectorStoreComponent):
diff --git a/src/backend/base/langflow/components/vectorstores/chroma.py b/src/backend/base/langflow/components/vectorstores/chroma.py
index e4f5711a0..31cc1fb85 100644
--- a/src/backend/base/langflow/components/vectorstores/chroma.py
+++ b/src/backend/base/langflow/components/vectorstores/chroma.py
@@ -1,4 +1,5 @@
from copy import deepcopy
+from typing import TYPE_CHECKING
from chromadb.config import Settings
from langchain_chroma import Chroma
@@ -6,8 +7,11 @@ from typing_extensions import override
from langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store
from langflow.base.vectorstores.utils import chroma_collection_to_data
-from langflow.io import BoolInput, DropdownInput, HandleInput, IntInput, StrInput
-from langflow.schema import Data, DataFrame
+from langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, IntInput, StrInput
+from langflow.schema.data import Data
+
+if TYPE_CHECKING:
+ from langflow.schema.dataframe import DataFrame
class ChromaVectorStoreComponent(LCVectorStoreComponent):
diff --git a/src/backend/base/langflow/components/vectorstores/clickhouse.py b/src/backend/base/langflow/components/vectorstores/clickhouse.py
index 42c43e700..bda2e43fc 100644
--- a/src/backend/base/langflow/components/vectorstores/clickhouse.py
+++ b/src/backend/base/langflow/components/vectorstores/clickhouse.py
@@ -2,7 +2,7 @@ from langchain_community.vectorstores import Clickhouse, ClickhouseSettings
from langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store
from langflow.helpers.data import docs_to_data
-from langflow.inputs import BoolInput, FloatInput
+from langflow.inputs.inputs import BoolInput, FloatInput
from langflow.io import (
DictInput,
DropdownInput,
@@ -11,7 +11,7 @@ from langflow.io import (
SecretStrInput,
StrInput,
)
-from langflow.schema import Data
+from langflow.schema.data import Data
class ClickhouseVectorStoreComponent(LCVectorStoreComponent):
diff --git a/src/backend/base/langflow/components/vectorstores/couchbase.py b/src/backend/base/langflow/components/vectorstores/couchbase.py
index c831dcd70..187f00e8e 100644
--- a/src/backend/base/langflow/components/vectorstores/couchbase.py
+++ b/src/backend/base/langflow/components/vectorstores/couchbase.py
@@ -5,7 +5,7 @@ from langchain_community.vectorstores import CouchbaseVectorStore
from langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store
from langflow.helpers.data import docs_to_data
from langflow.io import HandleInput, IntInput, SecretStrInput, StrInput
-from langflow.schema import Data
+from langflow.schema.data import Data
class CouchbaseVectorStoreComponent(LCVectorStoreComponent):
diff --git a/src/backend/base/langflow/components/vectorstores/elasticsearch.py b/src/backend/base/langflow/components/vectorstores/elasticsearch.py
index 85e13fd23..3b815cb37 100644
--- a/src/backend/base/langflow/components/vectorstores/elasticsearch.py
+++ b/src/backend/base/langflow/components/vectorstores/elasticsearch.py
@@ -12,7 +12,7 @@ from langflow.io import (
SecretStrInput,
StrInput,
)
-from langflow.schema import Data
+from langflow.schema.data import Data
class ElasticsearchVectorStoreComponent(LCVectorStoreComponent):
diff --git a/src/backend/base/langflow/components/vectorstores/faiss.py b/src/backend/base/langflow/components/vectorstores/faiss.py
index 6999413e3..0d8c19105 100644
--- a/src/backend/base/langflow/components/vectorstores/faiss.py
+++ b/src/backend/base/langflow/components/vectorstores/faiss.py
@@ -5,7 +5,7 @@ from langchain_community.vectorstores import FAISS
from langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store
from langflow.helpers.data import docs_to_data
from langflow.io import BoolInput, HandleInput, IntInput, StrInput
-from langflow.schema import Data
+from langflow.schema.data import Data
class FaissVectorStoreComponent(LCVectorStoreComponent):
diff --git a/src/backend/base/langflow/components/vectorstores/graph_rag.py b/src/backend/base/langflow/components/vectorstores/graph_rag.py
index 2fdcf24c4..36ae90d3d 100644
--- a/src/backend/base/langflow/components/vectorstores/graph_rag.py
+++ b/src/backend/base/langflow/components/vectorstores/graph_rag.py
@@ -5,9 +5,9 @@ import graph_retriever.strategies as strategies_module
from langchain_graph_retriever import GraphRetriever
from langflow.base.vectorstores.model import LCVectorStoreComponent
-from langflow.helpers import docs_to_data
-from langflow.inputs import DropdownInput, HandleInput, MultilineInput, NestedDictInput, StrInput
-from langflow.schema import Data
+from langflow.helpers.data import docs_to_data
+from langflow.inputs.inputs import DropdownInput, HandleInput, MultilineInput, NestedDictInput, StrInput
+from langflow.schema.data import Data
def traversal_strategies() -> list[str]:
diff --git a/src/backend/base/langflow/components/vectorstores/hcd.py b/src/backend/base/langflow/components/vectorstores/hcd.py
index bbfad73cb..2c6a9b2bf 100644
--- a/src/backend/base/langflow/components/vectorstores/hcd.py
+++ b/src/backend/base/langflow/components/vectorstores/hcd.py
@@ -1,6 +1,6 @@
from langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store
-from langflow.helpers import docs_to_data
-from langflow.inputs import DictInput, FloatInput
+from langflow.helpers.data import docs_to_data
+from langflow.inputs.inputs import DictInput, FloatInput
from langflow.io import (
BoolInput,
DropdownInput,
@@ -10,7 +10,7 @@ from langflow.io import (
SecretStrInput,
StrInput,
)
-from langflow.schema import Data
+from langflow.schema.data import Data
class HCDVectorStoreComponent(LCVectorStoreComponent):
diff --git a/src/backend/base/langflow/components/vectorstores/local_db.py b/src/backend/base/langflow/components/vectorstores/local_db.py
index 07a5d6773..2c82f8d5c 100644
--- a/src/backend/base/langflow/components/vectorstores/local_db.py
+++ b/src/backend/base/langflow/components/vectorstores/local_db.py
@@ -1,5 +1,6 @@
from copy import deepcopy
from pathlib import Path
+from typing import TYPE_CHECKING
from langchain_chroma import Chroma
from loguru import logger
@@ -9,9 +10,12 @@ from langflow.base.vectorstores.model import LCVectorStoreComponent, check_cache
from langflow.base.vectorstores.utils import chroma_collection_to_data
from langflow.inputs.inputs import MultilineInput
from langflow.io import BoolInput, DropdownInput, HandleInput, IntInput, MessageTextInput, TabInput
-from langflow.schema import Data, DataFrame
+from langflow.schema.data import Data
from langflow.template.field.base import Output
+if TYPE_CHECKING:
+ from langflow.schema.dataframe import DataFrame
+
class LocalDBComponent(LCVectorStoreComponent):
"""Chroma Vector Store with search capabilities."""
diff --git a/src/backend/base/langflow/components/vectorstores/milvus.py b/src/backend/base/langflow/components/vectorstores/milvus.py
index 054fbe1db..43b0334d1 100644
--- a/src/backend/base/langflow/components/vectorstores/milvus.py
+++ b/src/backend/base/langflow/components/vectorstores/milvus.py
@@ -10,7 +10,7 @@ from langflow.io import (
SecretStrInput,
StrInput,
)
-from langflow.schema import Data
+from langflow.schema.data import Data
class MilvusVectorStoreComponent(LCVectorStoreComponent):
diff --git a/src/backend/base/langflow/components/vectorstores/mongodb_atlas.py b/src/backend/base/langflow/components/vectorstores/mongodb_atlas.py
index d71a8ddb8..d54a40b51 100644
--- a/src/backend/base/langflow/components/vectorstores/mongodb_atlas.py
+++ b/src/backend/base/langflow/components/vectorstores/mongodb_atlas.py
@@ -9,7 +9,7 @@ from pymongo.operations import SearchIndexModel
from langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store
from langflow.helpers.data import docs_to_data
from langflow.io import BoolInput, DropdownInput, HandleInput, IntInput, SecretStrInput, StrInput
-from langflow.schema import Data
+from langflow.schema.data import Data
class MongoVectorStoreComponent(LCVectorStoreComponent):
diff --git a/src/backend/base/langflow/components/vectorstores/opensearch.py b/src/backend/base/langflow/components/vectorstores/opensearch.py
index c3962a20d..c0daac174 100644
--- a/src/backend/base/langflow/components/vectorstores/opensearch.py
+++ b/src/backend/base/langflow/components/vectorstores/opensearch.py
@@ -15,7 +15,7 @@ from langflow.io import (
SecretStrInput,
StrInput,
)
-from langflow.schema import Data
+from langflow.schema.data import Data
@vector_store_connection
diff --git a/src/backend/base/langflow/components/vectorstores/pgvector.py b/src/backend/base/langflow/components/vectorstores/pgvector.py
index fef611393..ebedd699c 100644
--- a/src/backend/base/langflow/components/vectorstores/pgvector.py
+++ b/src/backend/base/langflow/components/vectorstores/pgvector.py
@@ -3,7 +3,7 @@ from langchain_community.vectorstores import PGVector
from langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store
from langflow.helpers.data import docs_to_data
from langflow.io import HandleInput, IntInput, SecretStrInput, StrInput
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.utils.connection_string_parser import transform_connection_string
diff --git a/src/backend/base/langflow/components/vectorstores/pinecone.py b/src/backend/base/langflow/components/vectorstores/pinecone.py
index 43488a112..730c4e99e 100644
--- a/src/backend/base/langflow/components/vectorstores/pinecone.py
+++ b/src/backend/base/langflow/components/vectorstores/pinecone.py
@@ -4,7 +4,7 @@ from langchain_core.vectorstores import VectorStore
from langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store
from langflow.helpers.data import docs_to_data
from langflow.io import DropdownInput, HandleInput, IntInput, SecretStrInput, StrInput
-from langflow.schema import Data
+from langflow.schema.data import Data
class PineconeVectorStoreComponent(LCVectorStoreComponent):
diff --git a/src/backend/base/langflow/components/vectorstores/qdrant.py b/src/backend/base/langflow/components/vectorstores/qdrant.py
index 1bd50c836..e2f2fca66 100644
--- a/src/backend/base/langflow/components/vectorstores/qdrant.py
+++ b/src/backend/base/langflow/components/vectorstores/qdrant.py
@@ -10,7 +10,7 @@ from langflow.io import (
SecretStrInput,
StrInput,
)
-from langflow.schema import Data
+from langflow.schema.data import Data
class QdrantVectorStoreComponent(LCVectorStoreComponent):
diff --git a/src/backend/base/langflow/components/vectorstores/redis.py b/src/backend/base/langflow/components/vectorstores/redis.py
index dc78c8b00..c8c5b6ff8 100644
--- a/src/backend/base/langflow/components/vectorstores/redis.py
+++ b/src/backend/base/langflow/components/vectorstores/redis.py
@@ -6,7 +6,7 @@ from langchain_community.vectorstores.redis import Redis
from langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store
from langflow.helpers.data import docs_to_data
from langflow.io import HandleInput, IntInput, SecretStrInput, StrInput
-from langflow.schema import Data
+from langflow.schema.data import Data
class RedisVectorStoreComponent(LCVectorStoreComponent):
diff --git a/src/backend/base/langflow/components/vectorstores/supabase.py b/src/backend/base/langflow/components/vectorstores/supabase.py
index 1decae938..0ce65041d 100644
--- a/src/backend/base/langflow/components/vectorstores/supabase.py
+++ b/src/backend/base/langflow/components/vectorstores/supabase.py
@@ -4,7 +4,7 @@ from supabase.client import Client, create_client
from langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store
from langflow.helpers.data import docs_to_data
from langflow.io import HandleInput, IntInput, SecretStrInput, StrInput
-from langflow.schema import Data
+from langflow.schema.data import Data
class SupabaseVectorStoreComponent(LCVectorStoreComponent):
diff --git a/src/backend/base/langflow/components/vectorstores/upstash.py b/src/backend/base/langflow/components/vectorstores/upstash.py
index d1a80de19..3dfee691a 100644
--- a/src/backend/base/langflow/components/vectorstores/upstash.py
+++ b/src/backend/base/langflow/components/vectorstores/upstash.py
@@ -9,7 +9,7 @@ from langflow.io import (
SecretStrInput,
StrInput,
)
-from langflow.schema import Data
+from langflow.schema.data import Data
class UpstashVectorStoreComponent(LCVectorStoreComponent):
diff --git a/src/backend/base/langflow/components/vectorstores/vectara.py b/src/backend/base/langflow/components/vectorstores/vectara.py
index b058ba2f0..d517e607f 100644
--- a/src/backend/base/langflow/components/vectorstores/vectara.py
+++ b/src/backend/base/langflow/components/vectorstores/vectara.py
@@ -5,11 +5,13 @@ from langchain_community.vectorstores import Vectara
from langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store
from langflow.helpers.data import docs_to_data
from langflow.io import HandleInput, IntInput, SecretStrInput, StrInput
-from langflow.schema import Data, DataFrame
+from langflow.schema.data import Data
if TYPE_CHECKING:
from langchain_community.vectorstores import Vectara
+ from langflow.schema.dataframe import DataFrame
+
class VectaraVectorStoreComponent(LCVectorStoreComponent):
"""Vectara Vector Store with search capabilities."""
diff --git a/src/backend/base/langflow/components/vectorstores/vectara_rag.py b/src/backend/base/langflow/components/vectorstores/vectara_rag.py
index fda11a209..e37c5c588 100644
--- a/src/backend/base/langflow/components/vectorstores/vectara_rag.py
+++ b/src/backend/base/langflow/components/vectorstores/vectara_rag.py
@@ -1,4 +1,4 @@
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.field_typing.range_spec import RangeSpec
from langflow.io import DropdownInput, FloatInput, IntInput, MessageTextInput, Output, SecretStrInput, StrInput
from langflow.schema.message import Message
diff --git a/src/backend/base/langflow/components/vectorstores/vectara_self_query.py b/src/backend/base/langflow/components/vectorstores/vectara_self_query.py
index 62d2cfcf7..433f99ce6 100644
--- a/src/backend/base/langflow/components/vectorstores/vectara_self_query.py
+++ b/src/backend/base/langflow/components/vectorstores/vectara_self_query.py
@@ -5,7 +5,7 @@ from langchain.chains.query_constructor.base import AttributeInfo
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain_core.vectorstores import VectorStore
-from langflow.custom import CustomComponent
+from langflow.custom.custom_component.custom_component import CustomComponent
from langflow.field_typing import Retriever
from langflow.field_typing.constants import LanguageModel
diff --git a/src/backend/base/langflow/components/vectorstores/weaviate.py b/src/backend/base/langflow/components/vectorstores/weaviate.py
index e6418ce7b..964dd170d 100644
--- a/src/backend/base/langflow/components/vectorstores/weaviate.py
+++ b/src/backend/base/langflow/components/vectorstores/weaviate.py
@@ -4,7 +4,7 @@ from langchain_community.vectorstores import Weaviate
from langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store
from langflow.helpers.data import docs_to_data
from langflow.io import BoolInput, HandleInput, IntInput, SecretStrInput, StrInput
-from langflow.schema import Data
+from langflow.schema.data import Data
class WeaviateVectorStoreComponent(LCVectorStoreComponent):
diff --git a/src/backend/base/langflow/components/youtube/channel.py b/src/backend/base/langflow/components/youtube/channel.py
index 62a6e5bcd..2f778a7c5 100644
--- a/src/backend/base/langflow/components/youtube/channel.py
+++ b/src/backend/base/langflow/components/youtube/channel.py
@@ -5,10 +5,10 @@ import pandas as pd
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
-from langflow.custom import Component
-from langflow.inputs import BoolInput, MessageTextInput, SecretStrInput
-from langflow.schema import DataFrame
-from langflow.template import Output
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import BoolInput, MessageTextInput, SecretStrInput
+from langflow.schema.dataframe import DataFrame
+from langflow.template.field.base import Output
class YouTubeChannelComponent(Component):
diff --git a/src/backend/base/langflow/components/youtube/comments.py b/src/backend/base/langflow/components/youtube/comments.py
index 05fccce56..71e1f7362 100644
--- a/src/backend/base/langflow/components/youtube/comments.py
+++ b/src/backend/base/langflow/components/youtube/comments.py
@@ -4,10 +4,10 @@ import pandas as pd
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
-from langflow.custom import Component
-from langflow.inputs import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput
-from langflow.schema import DataFrame
-from langflow.template import Output
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput
+from langflow.schema.dataframe import DataFrame
+from langflow.template.field.base import Output
class YouTubeCommentsComponent(Component):
diff --git a/src/backend/base/langflow/components/youtube/playlist.py b/src/backend/base/langflow/components/youtube/playlist.py
index d81d657ea..9d0866766 100644
--- a/src/backend/base/langflow/components/youtube/playlist.py
+++ b/src/backend/base/langflow/components/youtube/playlist.py
@@ -1,9 +1,10 @@
from pytube import Playlist # Ensure you have pytube installed
-from langflow.custom import Component
-from langflow.inputs import MessageTextInput
-from langflow.schema import Data, DataFrame
-from langflow.template import Output
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import MessageTextInput
+from langflow.schema.data import Data
+from langflow.schema.dataframe import DataFrame
+from langflow.template.field.base import Output
class YouTubePlaylistComponent(Component):
diff --git a/src/backend/base/langflow/components/youtube/search.py b/src/backend/base/langflow/components/youtube/search.py
index 1efdee7f0..8c6cf80d4 100644
--- a/src/backend/base/langflow/components/youtube/search.py
+++ b/src/backend/base/langflow/components/youtube/search.py
@@ -4,10 +4,10 @@ import pandas as pd
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
-from langflow.custom import Component
-from langflow.inputs import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput
-from langflow.schema import DataFrame
-from langflow.template import Output
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput
+from langflow.schema.dataframe import DataFrame
+from langflow.template.field.base import Output
class YouTubeSearchComponent(Component):
diff --git a/src/backend/base/langflow/components/youtube/trending.py b/src/backend/base/langflow/components/youtube/trending.py
index 85ad66977..b3b6c0f98 100644
--- a/src/backend/base/langflow/components/youtube/trending.py
+++ b/src/backend/base/langflow/components/youtube/trending.py
@@ -4,10 +4,10 @@ import pandas as pd
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
-from langflow.custom import Component
-from langflow.inputs import BoolInput, DropdownInput, IntInput, SecretStrInput
-from langflow.schema import DataFrame
-from langflow.template import Output
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import BoolInput, DropdownInput, IntInput, SecretStrInput
+from langflow.schema.dataframe import DataFrame
+from langflow.template.field.base import Output
HTTP_FORBIDDEN = 403
HTTP_NOT_FOUND = 404
diff --git a/src/backend/base/langflow/components/youtube/video_details.py b/src/backend/base/langflow/components/youtube/video_details.py
index 0e8332e92..53e4f903e 100644
--- a/src/backend/base/langflow/components/youtube/video_details.py
+++ b/src/backend/base/langflow/components/youtube/video_details.py
@@ -5,10 +5,10 @@ import pandas as pd
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
-from langflow.custom import Component
-from langflow.inputs import BoolInput, MessageTextInput, SecretStrInput
-from langflow.schema import DataFrame
-from langflow.template import Output
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import BoolInput, MessageTextInput, SecretStrInput
+from langflow.schema.dataframe import DataFrame
+from langflow.template.field.base import Output
class YouTubeVideoDetailsComponent(Component):
diff --git a/src/backend/base/langflow/components/youtube/youtube_transcripts.py b/src/backend/base/langflow/components/youtube/youtube_transcripts.py
index 28ed2ed76..bb0eb92ea 100644
--- a/src/backend/base/langflow/components/youtube/youtube_transcripts.py
+++ b/src/backend/base/langflow/components/youtube/youtube_transcripts.py
@@ -3,10 +3,12 @@ import youtube_transcript_api
from langchain_community.document_loaders import YoutubeLoader
from langchain_community.document_loaders.youtube import TranscriptFormat
-from langflow.custom import Component
-from langflow.inputs import DropdownInput, IntInput, MultilineInput
-from langflow.schema import Data, DataFrame, Message
-from langflow.template import Output
+from langflow.custom.custom_component.component import Component
+from langflow.inputs.inputs import DropdownInput, IntInput, MultilineInput
+from langflow.schema.data import Data
+from langflow.schema.dataframe import DataFrame
+from langflow.schema.message import Message
+from langflow.template.field.base import Output
class YouTubeTranscriptsComponent(Component):
diff --git a/src/backend/base/langflow/custom/custom_component/base_component.py b/src/backend/base/langflow/custom/custom_component/base_component.py
index 37035f24d..fbd206103 100644
--- a/src/backend/base/langflow/custom/custom_component/base_component.py
+++ b/src/backend/base/langflow/custom/custom_component/base_component.py
@@ -8,7 +8,7 @@ from fastapi import HTTPException
from loguru import logger
from langflow.custom.attributes import ATTR_FUNC_MAPPING
-from langflow.custom.code_parser import CodeParser
+from langflow.custom.code_parser.code_parser import CodeParser
from langflow.custom.eval import eval_custom_component_code
from langflow.utils import validate
diff --git a/src/backend/base/langflow/custom/custom_component/component_with_cache.py b/src/backend/base/langflow/custom/custom_component/component_with_cache.py
index e8a6888d9..74f93528f 100644
--- a/src/backend/base/langflow/custom/custom_component/component_with_cache.py
+++ b/src/backend/base/langflow/custom/custom_component/component_with_cache.py
@@ -1,4 +1,4 @@
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.services.deps import get_shared_component_cache_service
diff --git a/src/backend/base/langflow/custom/custom_component/custom_component.py b/src/backend/base/langflow/custom/custom_component/custom_component.py
index adc85a14c..12bdb8f3f 100644
--- a/src/backend/base/langflow/custom/custom_component/custom_component.py
+++ b/src/backend/base/langflow/custom/custom_component/custom_component.py
@@ -12,7 +12,7 @@ from pydantic import BaseModel
from langflow.custom.custom_component.base_component import BaseComponent
from langflow.helpers.flow import list_flows, load_flow, run_flow
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.services.deps import get_storage_service, get_variable_service, session_scope
from langflow.services.storage.service import StorageService
from langflow.template.utils import update_frontend_node_with_template_values
diff --git a/src/backend/base/langflow/custom/directory_reader/directory_reader.py b/src/backend/base/langflow/custom/directory_reader/directory_reader.py
index fb0388059..2f8f7169f 100644
--- a/src/backend/base/langflow/custom/directory_reader/directory_reader.py
+++ b/src/backend/base/langflow/custom/directory_reader/directory_reader.py
@@ -7,7 +7,7 @@ import anyio
from aiofile import async_open
from loguru import logger
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
MAX_DEPTH = 2
diff --git a/src/backend/base/langflow/custom/directory_reader/utils.py b/src/backend/base/langflow/custom/directory_reader/utils.py
index e9c3357ff..619baf3cc 100644
--- a/src/backend/base/langflow/custom/directory_reader/utils.py
+++ b/src/backend/base/langflow/custom/directory_reader/utils.py
@@ -2,7 +2,7 @@ import asyncio
from loguru import logger
-from langflow.custom.directory_reader import DirectoryReader
+from langflow.custom.directory_reader.directory_reader import DirectoryReader
from langflow.template.frontend_node.custom_components import CustomComponentFrontendNode
diff --git a/src/backend/base/langflow/custom/eval.py b/src/backend/base/langflow/custom/eval.py
index b163e8ef7..00d8ffb94 100644
--- a/src/backend/base/langflow/custom/eval.py
+++ b/src/backend/base/langflow/custom/eval.py
@@ -3,7 +3,7 @@ from typing import TYPE_CHECKING
from langflow.utils import validate
if TYPE_CHECKING:
- from langflow.custom import CustomComponent
+ from langflow.custom.custom_component.custom_component import CustomComponent
def eval_custom_component_code(code: str) -> type["CustomComponent"]:
diff --git a/src/backend/base/langflow/custom/utils.py b/src/backend/base/langflow/custom/utils.py
index 13a0ac0ec..c6027c71f 100644
--- a/src/backend/base/langflow/custom/utils.py
+++ b/src/backend/base/langflow/custom/utils.py
@@ -13,8 +13,8 @@ from fastapi import HTTPException
from loguru import logger
from pydantic import BaseModel
-from langflow.custom import CustomComponent
from langflow.custom.custom_component.component import Component
+from langflow.custom.custom_component.custom_component import CustomComponent
from langflow.custom.directory_reader.utils import (
abuild_custom_component_list_from_path,
build_custom_component_list_from_path,
@@ -24,7 +24,7 @@ from langflow.custom.eval import eval_custom_component_code
from langflow.custom.schema import MissingDefault
from langflow.field_typing.range_spec import RangeSpec
from langflow.helpers.custom import format_type
-from langflow.schema import dotdict
+from langflow.schema.dotdict import dotdict
from langflow.template.field.base import Input
from langflow.template.frontend_node.custom_components import ComponentFrontendNode, CustomComponentFrontendNode
from langflow.type_extraction.type_extraction import extract_inner_type
diff --git a/src/backend/base/langflow/graph/graph/base.py b/src/backend/base/langflow/graph/graph/base.py
index b64212b88..0910a6514 100644
--- a/src/backend/base/langflow/graph/graph/base.py
+++ b/src/backend/base/langflow/graph/graph/base.py
@@ -52,7 +52,7 @@ if TYPE_CHECKING:
from langflow.events.event_manager import EventManager
from langflow.graph.edge.schema import EdgeData
from langflow.graph.schema import ResultData
- from langflow.schema import Data
+ from langflow.schema.data import Data
from langflow.services.chat.schema import GetCache, SetCache
from langflow.services.tracing.service import TracingService
diff --git a/src/backend/base/langflow/graph/schema.py b/src/backend/base/langflow/graph/schema.py
index 407b30640..0ca16a689 100644
--- a/src/backend/base/langflow/graph/schema.py
+++ b/src/backend/base/langflow/graph/schema.py
@@ -4,7 +4,7 @@ from typing import Any
from pydantic import BaseModel, Field, field_serializer, model_validator
from langflow.schema.schema import OutputValue, StreamURL
-from langflow.serialization import serialize
+from langflow.serialization.serialization import serialize
from langflow.utils.schemas import ChatOutputResponse, ContainsEnumMeta
diff --git a/src/backend/base/langflow/graph/utils.py b/src/backend/base/langflow/graph/utils.py
index 77c5bd2e4..4689c116a 100644
--- a/src/backend/base/langflow/graph/utils.py
+++ b/src/backend/base/langflow/graph/utils.py
@@ -11,8 +11,8 @@ from loguru import logger
from langflow.interface.utils import extract_input_variables_from_prompt
from langflow.schema.data import Data
from langflow.schema.message import Message
-from langflow.serialization import serialize
from langflow.serialization.constants import MAX_ITEMS_LENGTH, MAX_TEXT_LENGTH
+from langflow.serialization.serialization import serialize
from langflow.services.database.models.transactions.crud import log_transaction as crud_log_transaction
from langflow.services.database.models.transactions.model import TransactionBase
from langflow.services.database.models.vertex_builds.crud import log_vertex_build as crud_log_vertex_build
diff --git a/src/backend/base/langflow/graph/vertex/base.py b/src/backend/base/langflow/graph/vertex/base.py
index 4832a71c2..b9b2ed997 100644
--- a/src/backend/base/langflow/graph/vertex/base.py
+++ b/src/backend/base/langflow/graph/vertex/base.py
@@ -27,7 +27,7 @@ from langflow.utils.util import sync_to_async
if TYPE_CHECKING:
from uuid import UUID
- from langflow.custom import Component
+ from langflow.custom.custom_component.component import Component
from langflow.events.event_manager import EventManager
from langflow.graph.edge.base import CycleEdge, Edge
from langflow.graph.graph.base import Graph
diff --git a/src/backend/base/langflow/graph/vertex/vertex_types.py b/src/backend/base/langflow/graph/vertex/vertex_types.py
index 2b0a2bb5a..3c7a26d84 100644
--- a/src/backend/base/langflow/graph/vertex/vertex_types.py
+++ b/src/backend/base/langflow/graph/vertex/vertex_types.py
@@ -13,11 +13,11 @@ from langflow.graph.schema import CHAT_COMPONENTS, RECORDS_COMPONENTS, Interface
from langflow.graph.utils import UnbuiltObject, log_vertex_build, rewrite_file_path
from langflow.graph.vertex.base import Vertex
from langflow.graph.vertex.exceptions import NoComponentInstanceError
-from langflow.schema import Data
from langflow.schema.artifact import ArtifactType
+from langflow.schema.data import Data
from langflow.schema.message import Message
from langflow.schema.schema import INPUT_FIELD_NAME
-from langflow.serialization import serialize
+from langflow.serialization.serialization import serialize
from langflow.template.field.base import UNDEFINED, Output
from langflow.utils.schemas import ChatOutputResponse, DataOutputResponse
from langflow.utils.util import unescape_string
diff --git a/src/backend/base/langflow/helpers/data.py b/src/backend/base/langflow/helpers/data.py
index 38cd6056a..e6217addf 100644
--- a/src/backend/base/langflow/helpers/data.py
+++ b/src/backend/base/langflow/helpers/data.py
@@ -6,7 +6,8 @@ import orjson
from fastapi.encoders import jsonable_encoder
from langchain_core.documents import Document
-from langflow.schema import Data, DataFrame
+from langflow.schema.data import Data
+from langflow.schema.dataframe import DataFrame
from langflow.schema.message import Message
diff --git a/src/backend/base/langflow/helpers/flow.py b/src/backend/base/langflow/helpers/flow.py
index 6846c3428..1ee81edf8 100644
--- a/src/backend/base/langflow/helpers/flow.py
+++ b/src/backend/base/langflow/helpers/flow.py
@@ -9,8 +9,7 @@ from pydantic.v1 import BaseModel, Field, create_model
from sqlmodel import select
from langflow.schema.schema import INPUT_FIELD_NAME
-from langflow.services.database.models.flow import Flow
-from langflow.services.database.models.flow.model import FlowRead
+from langflow.services.database.models.flow.model import Flow, FlowRead
from langflow.services.deps import get_settings_service, session_scope
if TYPE_CHECKING:
@@ -19,7 +18,7 @@ if TYPE_CHECKING:
from langflow.graph.graph.base import Graph
from langflow.graph.schema import RunOutputs
from langflow.graph.vertex.base import Vertex
- from langflow.schema import Data
+ from langflow.schema.data import Data
INPUT_TYPE_MAP = {
"ChatInput": {"type_hint": "Optional[str]", "default": '""'},
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompt Chaining.json b/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompt Chaining.json
index bc886b083..5cc1868eb 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompt Chaining.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompt Chaining.json
@@ -256,7 +256,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"template": {
"_input_type": "PromptInput",
@@ -432,7 +432,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
@@ -749,7 +749,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -985,7 +985,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"template": {
"_input_type": "PromptInput",
@@ -1113,7 +1113,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"template": {
"_input_type": "PromptInput",
@@ -1367,7 +1367,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
+ "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
},
"input_value": {
"_input_type": "MessageInput",
@@ -1760,7 +1760,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
+ "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
},
"input_value": {
"_input_type": "MessageInput",
@@ -2153,7 +2153,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
+ "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
},
"input_value": {
"_input_type": "MessageInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting.json b/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting.json
index ec0de9fb9..f5f507f59 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting.json
@@ -190,7 +190,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"advanced": true,
@@ -432,7 +432,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"template": {
"_input_type": "PromptInput",
@@ -698,7 +698,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -985,7 +985,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
+ "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
},
"input_value": {
"_input_type": "MessageInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Blog Writer.json b/src/backend/base/langflow/initial_setup/starter_projects/Blog Writer.json
index dda68f527..0f1674446 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Blog Writer.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Blog Writer.json
@@ -213,7 +213,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"instructions": {
"advanced": false,
@@ -556,7 +556,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"advanced": true,
@@ -868,7 +868,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
+ "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
},
"input_value": {
"_input_type": "MessageInput",
@@ -1212,7 +1212,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.custom import Component\nfrom langflow.helpers.data import safe_convert\nfrom langflow.io import (\n BoolInput,\n HandleInput,\n MessageTextInput,\n MultilineInput,\n Output,\n TabInput,\n)\nfrom langflow.schema import Data, DataFrame\nfrom langflow.schema.message import Message\n\n\nclass ParserComponent(Component):\n display_name = \"Parser\"\n description = \"Extracts text using a template.\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"input_data\",\n display_name=\"Data or DataFrame\",\n input_types=[\"DataFrame\", \"Data\"],\n info=\"Accepts either a DataFrame or a Data object.\",\n required=True,\n ),\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Parser\", \"Stringify\"],\n value=\"Parser\",\n info=\"Convert into raw string instead of using a template.\",\n real_time_refresh=True,\n ),\n MultilineInput(\n name=\"pattern\",\n display_name=\"Template\",\n info=(\n \"Use variables within curly brackets to extract column values for DataFrames \"\n \"or key values for Data.\"\n \"For example: `Name: {Name}, Age: {Age}, Country: {Country}`\"\n ),\n value=\"Text: {text}\", # Example default\n dynamic=True,\n show=True,\n required=True,\n ),\n MessageTextInput(\n name=\"sep\",\n display_name=\"Separator\",\n advanced=True,\n value=\"\\n\",\n info=\"String used to separate rows/items.\",\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"Parsed Text\",\n name=\"parsed_text\",\n info=\"Formatted text output.\",\n method=\"parse_combined_text\",\n ),\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n \"\"\"Dynamically hide/show `template` and enforce requirement based on `stringify`.\"\"\"\n if field_name == \"mode\":\n build_config[\"pattern\"][\"show\"] = self.mode == \"Parser\"\n build_config[\"pattern\"][\"required\"] = self.mode == \"Parser\"\n if field_value:\n clean_data = BoolInput(\n name=\"clean_data\",\n display_name=\"Clean Data\",\n info=(\n \"Enable to clean the data by removing empty rows and lines \"\n \"in each cell of the DataFrame/ Data object.\"\n ),\n value=True,\n advanced=True,\n required=False,\n )\n build_config[\"clean_data\"] = clean_data.to_dict()\n else:\n build_config.pop(\"clean_data\", None)\n\n return build_config\n\n def _clean_args(self):\n \"\"\"Prepare arguments based on input type.\"\"\"\n input_data = self.input_data\n\n match input_data:\n case list() if all(isinstance(item, Data) for item in input_data):\n msg = \"List of Data objects is not supported.\"\n raise ValueError(msg)\n case DataFrame():\n return input_data, None\n case Data():\n return None, input_data\n case dict() if \"data\" in input_data:\n try:\n if \"columns\" in input_data: # Likely a DataFrame\n return DataFrame.from_dict(input_data), None\n # Likely a Data object\n return None, Data(**input_data)\n except (TypeError, ValueError, KeyError) as e:\n msg = f\"Invalid structured input provided: {e!s}\"\n raise ValueError(msg) from e\n case _:\n msg = f\"Unsupported input type: {type(input_data)}. Expected DataFrame or Data.\"\n raise ValueError(msg)\n\n def parse_combined_text(self) -> Message:\n \"\"\"Parse all rows/items into a single text or convert input to string if `stringify` is enabled.\"\"\"\n # Early return for stringify option\n if self.mode == \"Stringify\":\n return self.convert_to_string()\n\n df, data = self._clean_args()\n\n lines = []\n if df is not None:\n for _, row in df.iterrows():\n formatted_text = self.pattern.format(**row.to_dict())\n lines.append(formatted_text)\n elif data is not None:\n formatted_text = self.pattern.format(**data.data)\n lines.append(formatted_text)\n\n combined_text = self.sep.join(lines)\n self.status = combined_text\n return Message(text=combined_text)\n\n def convert_to_string(self) -> Message:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n result = \"\"\n if isinstance(self.input_data, list):\n result = \"\\n\".join([safe_convert(item, clean_data=self.clean_data or False) for item in self.input_data])\n else:\n result = safe_convert(self.input_data or False)\n self.log(f\"Converted to string with length: {len(result)}\")\n\n message = Message(text=result)\n self.status = message\n return message\n"
+ "value": "from langflow.custom.custom_component.component import Component\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, HandleInput, MessageTextInput, MultilineInput, TabInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.template.field.base import Output\n\n\nclass ParserComponent(Component):\n display_name = \"Parser\"\n description = \"Extracts text using a template.\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"input_data\",\n display_name=\"Data or DataFrame\",\n input_types=[\"DataFrame\", \"Data\"],\n info=\"Accepts either a DataFrame or a Data object.\",\n required=True,\n ),\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Parser\", \"Stringify\"],\n value=\"Parser\",\n info=\"Convert into raw string instead of using a template.\",\n real_time_refresh=True,\n ),\n MultilineInput(\n name=\"pattern\",\n display_name=\"Template\",\n info=(\n \"Use variables within curly brackets to extract column values for DataFrames \"\n \"or key values for Data.\"\n \"For example: `Name: {Name}, Age: {Age}, Country: {Country}`\"\n ),\n value=\"Text: {text}\", # Example default\n dynamic=True,\n show=True,\n required=True,\n ),\n MessageTextInput(\n name=\"sep\",\n display_name=\"Separator\",\n advanced=True,\n value=\"\\n\",\n info=\"String used to separate rows/items.\",\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"Parsed Text\",\n name=\"parsed_text\",\n info=\"Formatted text output.\",\n method=\"parse_combined_text\",\n ),\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n \"\"\"Dynamically hide/show `template` and enforce requirement based on `stringify`.\"\"\"\n if field_name == \"mode\":\n build_config[\"pattern\"][\"show\"] = self.mode == \"Parser\"\n build_config[\"pattern\"][\"required\"] = self.mode == \"Parser\"\n if field_value:\n clean_data = BoolInput(\n name=\"clean_data\",\n display_name=\"Clean Data\",\n info=(\n \"Enable to clean the data by removing empty rows and lines \"\n \"in each cell of the DataFrame/ Data object.\"\n ),\n value=True,\n advanced=True,\n required=False,\n )\n build_config[\"clean_data\"] = clean_data.to_dict()\n else:\n build_config.pop(\"clean_data\", None)\n\n return build_config\n\n def _clean_args(self):\n \"\"\"Prepare arguments based on input type.\"\"\"\n input_data = self.input_data\n\n match input_data:\n case list() if all(isinstance(item, Data) for item in input_data):\n msg = \"List of Data objects is not supported.\"\n raise ValueError(msg)\n case DataFrame():\n return input_data, None\n case Data():\n return None, input_data\n case dict() if \"data\" in input_data:\n try:\n if \"columns\" in input_data: # Likely a DataFrame\n return DataFrame.from_dict(input_data), None\n # Likely a Data object\n return None, Data(**input_data)\n except (TypeError, ValueError, KeyError) as e:\n msg = f\"Invalid structured input provided: {e!s}\"\n raise ValueError(msg) from e\n case _:\n msg = f\"Unsupported input type: {type(input_data)}. Expected DataFrame or Data.\"\n raise ValueError(msg)\n\n def parse_combined_text(self) -> Message:\n \"\"\"Parse all rows/items into a single text or convert input to string if `stringify` is enabled.\"\"\"\n # Early return for stringify option\n if self.mode == \"Stringify\":\n return self.convert_to_string()\n\n df, data = self._clean_args()\n\n lines = []\n if df is not None:\n for _, row in df.iterrows():\n formatted_text = self.pattern.format(**row.to_dict())\n lines.append(formatted_text)\n elif data is not None:\n formatted_text = self.pattern.format(**data.data)\n lines.append(formatted_text)\n\n combined_text = self.sep.join(lines)\n self.status = combined_text\n return Message(text=combined_text)\n\n def convert_to_string(self) -> Message:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n result = \"\"\n if isinstance(self.input_data, list):\n result = \"\\n\".join([safe_convert(item, clean_data=self.clean_data or False) for item in self.input_data])\n else:\n result = safe_convert(self.input_data or False)\n self.log(f\"Converted to string with length: {len(result)}\")\n\n message = Message(text=result)\n self.status = message\n return message\n"
},
"input_data": {
"_input_type": "HandleInput",
@@ -1445,7 +1445,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import re\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom langchain_community.document_loaders import RecursiveUrlLoader\nfrom loguru import logger\n\nfrom langflow.custom import Component\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.helpers.data import safe_convert\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, Output, SliderInput, TableInput\nfrom langflow.schema import DataFrame, Message\nfrom langflow.services.deps import get_settings_service\n\n# Constants\nDEFAULT_TIMEOUT = 30\nDEFAULT_MAX_DEPTH = 1\nDEFAULT_FORMAT = \"Text\"\nURL_REGEX = re.compile(\n r\"^(https?:\\/\\/)?\" r\"(www\\.)?\" r\"([a-zA-Z0-9.-]+)\" r\"(\\.[a-zA-Z]{2,})?\" r\"(:\\d+)?\" r\"(\\/[^\\s]*)?$\",\n re.IGNORECASE,\n)\n\n\nclass URLComponent(Component):\n \"\"\"A component that loads and parses content from web pages recursively.\n\n This component allows fetching content from one or more URLs, with options to:\n - Control crawl depth\n - Prevent crawling outside the root domain\n - Use async loading for better performance\n - Extract either raw HTML or clean text\n - Configure request headers and timeouts\n \"\"\"\n\n display_name = \"URL\"\n description = \"Fetch content from one or more web pages, following links recursively.\"\n icon = \"layout-template\"\n name = \"URLComponent\"\n\n inputs = [\n MessageTextInput(\n name=\"urls\",\n display_name=\"URLs\",\n info=\"Enter one or more URLs to crawl recursively, by clicking the '+' button.\",\n is_list=True,\n tool_mode=True,\n placeholder=\"Enter a URL...\",\n list_add_label=\"Add URL\",\n input_types=[],\n ),\n SliderInput(\n name=\"max_depth\",\n display_name=\"Depth\",\n info=(\n \"Controls how many 'clicks' away from the initial page the crawler will go:\\n\"\n \"- depth 1: only the initial page\\n\"\n \"- depth 2: initial page + all pages linked directly from it\\n\"\n \"- depth 3: initial page + direct links + links found on those direct link pages\\n\"\n \"Note: This is about link traversal, not URL path depth.\"\n ),\n value=DEFAULT_MAX_DEPTH,\n range_spec=RangeSpec(min=1, max=5, step=1),\n required=False,\n min_label=\" \",\n max_label=\" \",\n min_label_icon=\"None\",\n max_label_icon=\"None\",\n # slider_input=True\n ),\n BoolInput(\n name=\"prevent_outside\",\n display_name=\"Prevent Outside\",\n info=(\n \"If enabled, only crawls URLs within the same domain as the root URL. \"\n \"This helps prevent the crawler from going to external websites.\"\n ),\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"use_async\",\n display_name=\"Use Async\",\n info=(\n \"If enabled, uses asynchronous loading which can be significantly faster \"\n \"but might use more system resources.\"\n ),\n value=True,\n required=False,\n advanced=True,\n ),\n DropdownInput(\n name=\"format\",\n display_name=\"Output Format\",\n info=\"Output Format. Use 'Text' to extract the text from the HTML or 'HTML' for the raw HTML content.\",\n options=[\"Text\", \"HTML\"],\n value=DEFAULT_FORMAT,\n advanced=True,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"Timeout for the request in seconds.\",\n value=DEFAULT_TIMEOUT,\n required=False,\n advanced=True,\n ),\n TableInput(\n name=\"headers\",\n display_name=\"Headers\",\n info=\"The headers to send with the request\",\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Header\",\n \"type\": \"str\",\n \"description\": \"Header name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Header value\",\n },\n ],\n value=[{\"key\": \"User-Agent\", \"value\": get_settings_service().settings.user_agent}],\n advanced=True,\n input_types=[\"DataFrame\"],\n ),\n BoolInput(\n name=\"filter_text_html\",\n display_name=\"Filter Text/HTML\",\n info=\"If enabled, filters out text/css content type from the results.\",\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"continue_on_failure\",\n display_name=\"Continue on Failure\",\n info=\"If enabled, continues crawling even if some requests fail.\",\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"check_response_status\",\n display_name=\"Check Response Status\",\n info=\"If enabled, checks the response status of the request.\",\n value=False,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"autoset_encoding\",\n display_name=\"Autoset Encoding\",\n info=\"If enabled, automatically sets the encoding of the request.\",\n value=True,\n required=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Result\", name=\"page_results\", method=\"fetch_content\"),\n Output(display_name=\"Raw Result\", name=\"raw_results\", method=\"as_message\"),\n ]\n\n @staticmethod\n def validate_url(url: str) -> bool:\n \"\"\"Validates if the given string matches URL pattern.\n\n Args:\n url: The URL string to validate\n\n Returns:\n bool: True if the URL is valid, False otherwise\n \"\"\"\n return bool(URL_REGEX.match(url))\n\n def ensure_url(self, url: str) -> str:\n \"\"\"Ensures the given string is a valid URL.\n\n Args:\n url: The URL string to validate and normalize\n\n Returns:\n str: The normalized URL\n\n Raises:\n ValueError: If the URL is invalid\n \"\"\"\n url = url.strip()\n if not url.startswith((\"http://\", \"https://\")):\n url = \"https://\" + url\n\n if not self.validate_url(url):\n msg = f\"Invalid URL: {url}\"\n raise ValueError(msg)\n\n return url\n\n def _create_loader(self, url: str) -> RecursiveUrlLoader:\n \"\"\"Creates a RecursiveUrlLoader instance with the configured settings.\n\n Args:\n url: The URL to load\n\n Returns:\n RecursiveUrlLoader: Configured loader instance\n \"\"\"\n headers_dict = {header[\"key\"]: header[\"value\"] for header in self.headers}\n extractor = (lambda x: x) if self.format == \"HTML\" else (lambda x: BeautifulSoup(x, \"lxml\").get_text())\n\n return RecursiveUrlLoader(\n url=url,\n max_depth=self.max_depth,\n prevent_outside=self.prevent_outside,\n use_async=self.use_async,\n extractor=extractor,\n timeout=self.timeout,\n headers=headers_dict,\n check_response_status=self.check_response_status,\n continue_on_failure=self.continue_on_failure,\n base_url=url, # Add base_url to ensure consistent domain crawling\n autoset_encoding=self.autoset_encoding, # Enable automatic encoding detection\n exclude_dirs=[], # Allow customization of excluded directories\n link_regex=None, # Allow customization of link filtering\n )\n\n def fetch_url_contents(self) -> list[dict]:\n \"\"\"Load documents from the configured URLs.\n\n Returns:\n List[Data]: List of Data objects containing the fetched content\n\n Raises:\n ValueError: If no valid URLs are provided or if there's an error loading documents\n \"\"\"\n try:\n urls = list({self.ensure_url(url) for url in self.urls if url.strip()})\n logger.info(f\"URLs: {urls}\")\n if not urls:\n msg = \"No valid URLs provided.\"\n raise ValueError(msg)\n\n all_docs = []\n for url in urls:\n logger.info(f\"Loading documents from {url}\")\n\n try:\n loader = self._create_loader(url)\n docs = loader.load()\n\n if not docs:\n logger.warning(f\"No documents found for {url}\")\n continue\n\n logger.info(f\"Found {len(docs)} documents from {url}\")\n all_docs.extend(docs)\n\n except requests.exceptions.RequestException as e:\n logger.exception(f\"Error loading documents from {url}: {e}\")\n continue\n\n if not all_docs:\n msg = \"No documents were successfully loaded from any URL\"\n raise ValueError(msg)\n\n # data = [Data(text=doc.page_content, **doc.metadata) for doc in all_docs]\n data = [\n {\n \"text\": safe_convert(doc.page_content, clean_data=True),\n \"url\": doc.metadata.get(\"source\", \"\"),\n \"title\": doc.metadata.get(\"title\", \"\"),\n \"description\": doc.metadata.get(\"description\", \"\"),\n \"content_type\": doc.metadata.get(\"content_type\", \"\"),\n \"language\": doc.metadata.get(\"language\", \"\"),\n }\n for doc in all_docs\n ]\n except Exception as e:\n error_msg = e.message if hasattr(e, \"message\") else e\n msg = f\"Error loading documents: {error_msg!s}\"\n logger.exception(msg)\n raise ValueError(msg) from e\n return data\n\n def fetch_content(self) -> DataFrame:\n \"\"\"Convert the documents to a DataFrame.\"\"\"\n return DataFrame(data=self.fetch_url_contents())\n\n def as_message(self) -> Message:\n \"\"\"Convert the documents to a Message.\"\"\"\n url_contents = self.fetch_url_contents()\n return Message(text=\"\\n\\n\".join([x[\"text\"] for x in url_contents]), data={\"data\": url_contents})\n"
+ "value": "import re\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom langchain_community.document_loaders import RecursiveUrlLoader\nfrom loguru import logger\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.helpers.data import safe_convert\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, Output, SliderInput, TableInput\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.services.deps import get_settings_service\n\n# Constants\nDEFAULT_TIMEOUT = 30\nDEFAULT_MAX_DEPTH = 1\nDEFAULT_FORMAT = \"Text\"\nURL_REGEX = re.compile(\n r\"^(https?:\\/\\/)?\" r\"(www\\.)?\" r\"([a-zA-Z0-9.-]+)\" r\"(\\.[a-zA-Z]{2,})?\" r\"(:\\d+)?\" r\"(\\/[^\\s]*)?$\",\n re.IGNORECASE,\n)\n\n\nclass URLComponent(Component):\n \"\"\"A component that loads and parses content from web pages recursively.\n\n This component allows fetching content from one or more URLs, with options to:\n - Control crawl depth\n - Prevent crawling outside the root domain\n - Use async loading for better performance\n - Extract either raw HTML or clean text\n - Configure request headers and timeouts\n \"\"\"\n\n display_name = \"URL\"\n description = \"Fetch content from one or more web pages, following links recursively.\"\n icon = \"layout-template\"\n name = \"URLComponent\"\n\n inputs = [\n MessageTextInput(\n name=\"urls\",\n display_name=\"URLs\",\n info=\"Enter one or more URLs to crawl recursively, by clicking the '+' button.\",\n is_list=True,\n tool_mode=True,\n placeholder=\"Enter a URL...\",\n list_add_label=\"Add URL\",\n input_types=[],\n ),\n SliderInput(\n name=\"max_depth\",\n display_name=\"Depth\",\n info=(\n \"Controls how many 'clicks' away from the initial page the crawler will go:\\n\"\n \"- depth 1: only the initial page\\n\"\n \"- depth 2: initial page + all pages linked directly from it\\n\"\n \"- depth 3: initial page + direct links + links found on those direct link pages\\n\"\n \"Note: This is about link traversal, not URL path depth.\"\n ),\n value=DEFAULT_MAX_DEPTH,\n range_spec=RangeSpec(min=1, max=5, step=1),\n required=False,\n min_label=\" \",\n max_label=\" \",\n min_label_icon=\"None\",\n max_label_icon=\"None\",\n # slider_input=True\n ),\n BoolInput(\n name=\"prevent_outside\",\n display_name=\"Prevent Outside\",\n info=(\n \"If enabled, only crawls URLs within the same domain as the root URL. \"\n \"This helps prevent the crawler from going to external websites.\"\n ),\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"use_async\",\n display_name=\"Use Async\",\n info=(\n \"If enabled, uses asynchronous loading which can be significantly faster \"\n \"but might use more system resources.\"\n ),\n value=True,\n required=False,\n advanced=True,\n ),\n DropdownInput(\n name=\"format\",\n display_name=\"Output Format\",\n info=\"Output Format. Use 'Text' to extract the text from the HTML or 'HTML' for the raw HTML content.\",\n options=[\"Text\", \"HTML\"],\n value=DEFAULT_FORMAT,\n advanced=True,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"Timeout for the request in seconds.\",\n value=DEFAULT_TIMEOUT,\n required=False,\n advanced=True,\n ),\n TableInput(\n name=\"headers\",\n display_name=\"Headers\",\n info=\"The headers to send with the request\",\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Header\",\n \"type\": \"str\",\n \"description\": \"Header name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Header value\",\n },\n ],\n value=[{\"key\": \"User-Agent\", \"value\": get_settings_service().settings.user_agent}],\n advanced=True,\n input_types=[\"DataFrame\"],\n ),\n BoolInput(\n name=\"filter_text_html\",\n display_name=\"Filter Text/HTML\",\n info=\"If enabled, filters out text/css content type from the results.\",\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"continue_on_failure\",\n display_name=\"Continue on Failure\",\n info=\"If enabled, continues crawling even if some requests fail.\",\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"check_response_status\",\n display_name=\"Check Response Status\",\n info=\"If enabled, checks the response status of the request.\",\n value=False,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"autoset_encoding\",\n display_name=\"Autoset Encoding\",\n info=\"If enabled, automatically sets the encoding of the request.\",\n value=True,\n required=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Result\", name=\"page_results\", method=\"fetch_content\"),\n Output(display_name=\"Raw Result\", name=\"raw_results\", method=\"as_message\"),\n ]\n\n @staticmethod\n def validate_url(url: str) -> bool:\n \"\"\"Validates if the given string matches URL pattern.\n\n Args:\n url: The URL string to validate\n\n Returns:\n bool: True if the URL is valid, False otherwise\n \"\"\"\n return bool(URL_REGEX.match(url))\n\n def ensure_url(self, url: str) -> str:\n \"\"\"Ensures the given string is a valid URL.\n\n Args:\n url: The URL string to validate and normalize\n\n Returns:\n str: The normalized URL\n\n Raises:\n ValueError: If the URL is invalid\n \"\"\"\n url = url.strip()\n if not url.startswith((\"http://\", \"https://\")):\n url = \"https://\" + url\n\n if not self.validate_url(url):\n msg = f\"Invalid URL: {url}\"\n raise ValueError(msg)\n\n return url\n\n def _create_loader(self, url: str) -> RecursiveUrlLoader:\n \"\"\"Creates a RecursiveUrlLoader instance with the configured settings.\n\n Args:\n url: The URL to load\n\n Returns:\n RecursiveUrlLoader: Configured loader instance\n \"\"\"\n headers_dict = {header[\"key\"]: header[\"value\"] for header in self.headers}\n extractor = (lambda x: x) if self.format == \"HTML\" else (lambda x: BeautifulSoup(x, \"lxml\").get_text())\n\n return RecursiveUrlLoader(\n url=url,\n max_depth=self.max_depth,\n prevent_outside=self.prevent_outside,\n use_async=self.use_async,\n extractor=extractor,\n timeout=self.timeout,\n headers=headers_dict,\n check_response_status=self.check_response_status,\n continue_on_failure=self.continue_on_failure,\n base_url=url, # Add base_url to ensure consistent domain crawling\n autoset_encoding=self.autoset_encoding, # Enable automatic encoding detection\n exclude_dirs=[], # Allow customization of excluded directories\n link_regex=None, # Allow customization of link filtering\n )\n\n def fetch_url_contents(self) -> list[dict]:\n \"\"\"Load documents from the configured URLs.\n\n Returns:\n List[Data]: List of Data objects containing the fetched content\n\n Raises:\n ValueError: If no valid URLs are provided or if there's an error loading documents\n \"\"\"\n try:\n urls = list({self.ensure_url(url) for url in self.urls if url.strip()})\n logger.info(f\"URLs: {urls}\")\n if not urls:\n msg = \"No valid URLs provided.\"\n raise ValueError(msg)\n\n all_docs = []\n for url in urls:\n logger.info(f\"Loading documents from {url}\")\n\n try:\n loader = self._create_loader(url)\n docs = loader.load()\n\n if not docs:\n logger.warning(f\"No documents found for {url}\")\n continue\n\n logger.info(f\"Found {len(docs)} documents from {url}\")\n all_docs.extend(docs)\n\n except requests.exceptions.RequestException as e:\n logger.exception(f\"Error loading documents from {url}: {e}\")\n continue\n\n if not all_docs:\n msg = \"No documents were successfully loaded from any URL\"\n raise ValueError(msg)\n\n # data = [Data(text=doc.page_content, **doc.metadata) for doc in all_docs]\n data = [\n {\n \"text\": safe_convert(doc.page_content, clean_data=True),\n \"url\": doc.metadata.get(\"source\", \"\"),\n \"title\": doc.metadata.get(\"title\", \"\"),\n \"description\": doc.metadata.get(\"description\", \"\"),\n \"content_type\": doc.metadata.get(\"content_type\", \"\"),\n \"language\": doc.metadata.get(\"language\", \"\"),\n }\n for doc in all_docs\n ]\n except Exception as e:\n error_msg = e.message if hasattr(e, \"message\") else e\n msg = f\"Error loading documents: {error_msg!s}\"\n logger.exception(msg)\n raise ValueError(msg) from e\n return data\n\n def fetch_content(self) -> DataFrame:\n \"\"\"Convert the documents to a DataFrame.\"\"\"\n return DataFrame(data=self.fetch_url_contents())\n\n def as_message(self) -> Message:\n \"\"\"Convert the documents to a Message.\"\"\"\n url_contents = self.fetch_url_contents()\n return Message(text=\"\\n\\n\".join([x[\"text\"] for x in url_contents]), data={\"data\": url_contents})\n"
},
"continue_on_failure": {
"_input_type": "BoolInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Custom Component Maker.json b/src/backend/base/langflow/initial_setup/starter_projects/Custom Component Maker.json
index 22e4f2072..810d35aec 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Custom Component Maker.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Custom Component Maker.json
@@ -297,7 +297,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any, cast\n\nfrom langflow.custom import Component\nfrom langflow.inputs import HandleInput\nfrom langflow.io import DropdownInput, IntInput, MessageTextInput, MultilineInput, Output, TabInput\nfrom langflow.memory import aget_messages, astore_message\nfrom langflow.schema import Data, dotdict\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.utils.component_utils import set_current_fields, set_field_display\nfrom langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI, MESSAGE_SENDER_USER\n\n\nclass MemoryComponent(Component):\n display_name = \"Message History\"\n description = \"Stores or retrieves stored chat messages from Langflow tables or an external memory.\"\n icon = \"message-square-more\"\n name = \"Memory\"\n default_keys = [\"mode\", \"memory\"]\n mode_config = {\n \"Store\": [\"message\", \"memory\", \"sender\", \"sender_name\", \"session_id\"],\n \"Retrieve\": [\"n_messages\", \"order\", \"template\", \"memory\"],\n }\n\n inputs = [\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Retrieve\", \"Store\"],\n value=\"Retrieve\",\n info=\"Operation mode: Store messages or Retrieve messages.\",\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"message\",\n display_name=\"Message\",\n info=\"The chat message to be stored.\",\n tool_mode=True,\n dynamic=True,\n show=False,\n ),\n HandleInput(\n name=\"memory\",\n display_name=\"External Memory\",\n input_types=[\"Memory\"],\n info=\"Retrieve messages from an external memory. If empty, it will use the Langflow tables.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER, \"Machine and User\"],\n value=\"Machine and User\",\n info=\"Filter by sender type.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Filter by sender name.\",\n advanced=True,\n show=False,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Messages\",\n value=100,\n info=\"Number of messages to retrieve.\",\n advanced=True,\n show=False,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"order\",\n display_name=\"Order\",\n options=[\"Ascending\", \"Descending\"],\n value=\"Ascending\",\n info=\"Order of the messages.\",\n advanced=True,\n tool_mode=True,\n required=True,\n show=False,\n ),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. \"\n \"It can contain the keys {text}, {sender} or any other key in the message data.\",\n value=\"{sender_name}: {text}\",\n advanced=True,\n show=False,\n ),\n ]\n\n outputs = [Output(display_name=\"Messages\", name=\"dataframe\", method=\"retrieve_messages_dataframe\", dynamic=True)]\n\n def update_outputs(self, frontend_node: dict, field_name: str, field_value: Any) -> dict:\n \"\"\"Dynamically show only the relevant output based on the selected output type.\"\"\"\n if field_name == \"mode\":\n # Start with empty outputs\n frontend_node[\"outputs\"] = []\n if field_value == \"Store\":\n frontend_node[\"outputs\"] = [\n Output(\n display_name=\"Stored Messages\",\n name=\"stored_messages\",\n method=\"store_message\",\n hidden=True,\n dynamic=True,\n )\n ]\n if field_value == \"Retrieve\":\n frontend_node[\"outputs\"] = [\n Output(\n display_name=\"Messages\", name=\"dataframe\", method=\"retrieve_messages_dataframe\", dynamic=True\n )\n ]\n return frontend_node\n\n async def retrieve_messages(self) -> Data:\n sender = self.sender\n sender_name = self.sender_name\n session_id = self.session_id\n n_messages = self.n_messages\n order = \"DESC\" if self.order == \"Descending\" else \"ASC\"\n\n if sender == \"Machine and User\":\n sender = None\n\n if self.memory and not hasattr(self.memory, \"aget_messages\"):\n memory_name = type(self.memory).__name__\n err_msg = f\"External Memory object ({memory_name}) must have 'aget_messages' method.\"\n raise AttributeError(err_msg)\n # Check if n_messages is None or 0\n if n_messages == 0:\n stored = []\n elif self.memory:\n # override session_id\n self.memory.session_id = session_id\n\n stored = await self.memory.aget_messages()\n # langchain memories are supposed to return messages in ascending order\n if order == \"DESC\":\n stored = stored[::-1]\n if n_messages:\n stored = stored[:n_messages]\n stored = [Message.from_lc_message(m) for m in stored]\n if sender:\n expected_type = MESSAGE_SENDER_AI if sender == MESSAGE_SENDER_AI else MESSAGE_SENDER_USER\n stored = [m for m in stored if m.type == expected_type]\n else:\n stored = await aget_messages(\n sender=sender,\n sender_name=sender_name,\n session_id=session_id,\n limit=n_messages,\n order=order,\n )\n self.status = stored\n return cast(Data, stored)\n\n async def retrieve_messages_dataframe(self) -> DataFrame:\n \"\"\"Convert the retrieved messages into a DataFrame.\n\n Returns:\n DataFrame: A DataFrame containing the message data.\n \"\"\"\n messages = await self.retrieve_messages()\n return DataFrame(messages)\n\n async def store_message(self) -> Message:\n message = Message(text=self.message) if isinstance(self.message, str) else self.message\n\n message.session_id = self.session_id or message.session_id\n message.sender = self.sender or message.sender or MESSAGE_SENDER_AI\n message.sender_name = self.sender_name or message.sender_name or MESSAGE_SENDER_NAME_AI\n\n stored_messages: list[Message] = []\n\n if self.memory:\n self.memory.session_id = message.session_id\n lc_message = message.to_lc_message()\n await self.memory.aadd_messages([lc_message])\n\n stored_messages = await self.memory.aget_messages() or []\n\n stored_messages = [Message.from_lc_message(m) for m in stored_messages] if stored_messages else []\n\n if message.sender:\n stored_messages = [m for m in stored_messages if m.sender == message.sender]\n else:\n await astore_message(message, flow_id=self.graph.flow_id)\n stored_messages = (\n await aget_messages(\n session_id=message.session_id, sender_name=message.sender_name, sender=message.sender\n )\n or []\n )\n\n if not stored_messages:\n msg = \"No messages were stored. Please ensure that the session ID and sender are properly set.\"\n raise ValueError(msg)\n\n stored_message = stored_messages[0]\n self.status = stored_message\n return stored_message\n\n def update_build_config(\n self,\n build_config: dotdict,\n field_value: Any, # noqa: ARG002\n field_name: str | None = None, # noqa: ARG002\n ) -> dotdict:\n return set_current_fields(\n build_config=build_config,\n action_fields=self.mode_config,\n selected_action=build_config[\"mode\"][\"value\"],\n default_fields=self.default_keys,\n func=set_field_display,\n )\n"
+ "value": "from typing import Any, cast\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DropdownInput, HandleInput, IntInput, MessageTextInput, MultilineInput, TabInput\nfrom langflow.memory import aget_messages, astore_message\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\nfrom langflow.template.field.base import Output\nfrom langflow.utils.component_utils import set_current_fields, set_field_display\nfrom langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI, MESSAGE_SENDER_USER\n\n\nclass MemoryComponent(Component):\n display_name = \"Message History\"\n description = \"Stores or retrieves stored chat messages from Langflow tables or an external memory.\"\n icon = \"message-square-more\"\n name = \"Memory\"\n default_keys = [\"mode\", \"memory\"]\n mode_config = {\n \"Store\": [\"message\", \"memory\", \"sender\", \"sender_name\", \"session_id\"],\n \"Retrieve\": [\"n_messages\", \"order\", \"template\", \"memory\"],\n }\n\n inputs = [\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Retrieve\", \"Store\"],\n value=\"Retrieve\",\n info=\"Operation mode: Store messages or Retrieve messages.\",\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"message\",\n display_name=\"Message\",\n info=\"The chat message to be stored.\",\n tool_mode=True,\n dynamic=True,\n show=False,\n ),\n HandleInput(\n name=\"memory\",\n display_name=\"External Memory\",\n input_types=[\"Memory\"],\n info=\"Retrieve messages from an external memory. If empty, it will use the Langflow tables.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER, \"Machine and User\"],\n value=\"Machine and User\",\n info=\"Filter by sender type.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Filter by sender name.\",\n advanced=True,\n show=False,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Messages\",\n value=100,\n info=\"Number of messages to retrieve.\",\n advanced=True,\n show=False,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"order\",\n display_name=\"Order\",\n options=[\"Ascending\", \"Descending\"],\n value=\"Ascending\",\n info=\"Order of the messages.\",\n advanced=True,\n tool_mode=True,\n required=True,\n show=False,\n ),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. \"\n \"It can contain the keys {text}, {sender} or any other key in the message data.\",\n value=\"{sender_name}: {text}\",\n advanced=True,\n show=False,\n ),\n ]\n\n outputs = [Output(display_name=\"Messages\", name=\"dataframe\", method=\"retrieve_messages_dataframe\", dynamic=True)]\n\n def update_outputs(self, frontend_node: dict, field_name: str, field_value: Any) -> dict:\n \"\"\"Dynamically show only the relevant output based on the selected output type.\"\"\"\n if field_name == \"mode\":\n # Start with empty outputs\n frontend_node[\"outputs\"] = []\n if field_value == \"Store\":\n frontend_node[\"outputs\"] = [\n Output(\n display_name=\"Stored Messages\",\n name=\"stored_messages\",\n method=\"store_message\",\n hidden=True,\n dynamic=True,\n )\n ]\n if field_value == \"Retrieve\":\n frontend_node[\"outputs\"] = [\n Output(\n display_name=\"Messages\", name=\"dataframe\", method=\"retrieve_messages_dataframe\", dynamic=True\n )\n ]\n return frontend_node\n\n async def retrieve_messages(self) -> Data:\n sender = self.sender\n sender_name = self.sender_name\n session_id = self.session_id\n n_messages = self.n_messages\n order = \"DESC\" if self.order == \"Descending\" else \"ASC\"\n\n if sender == \"Machine and User\":\n sender = None\n\n if self.memory and not hasattr(self.memory, \"aget_messages\"):\n memory_name = type(self.memory).__name__\n err_msg = f\"External Memory object ({memory_name}) must have 'aget_messages' method.\"\n raise AttributeError(err_msg)\n # Check if n_messages is None or 0\n if n_messages == 0:\n stored = []\n elif self.memory:\n # override session_id\n self.memory.session_id = session_id\n\n stored = await self.memory.aget_messages()\n # langchain memories are supposed to return messages in ascending order\n if order == \"DESC\":\n stored = stored[::-1]\n if n_messages:\n stored = stored[:n_messages]\n stored = [Message.from_lc_message(m) for m in stored]\n if sender:\n expected_type = MESSAGE_SENDER_AI if sender == MESSAGE_SENDER_AI else MESSAGE_SENDER_USER\n stored = [m for m in stored if m.type == expected_type]\n else:\n stored = await aget_messages(\n sender=sender,\n sender_name=sender_name,\n session_id=session_id,\n limit=n_messages,\n order=order,\n )\n self.status = stored\n return cast(Data, stored)\n\n async def retrieve_messages_dataframe(self) -> DataFrame:\n \"\"\"Convert the retrieved messages into a DataFrame.\n\n Returns:\n DataFrame: A DataFrame containing the message data.\n \"\"\"\n messages = await self.retrieve_messages()\n return DataFrame(messages)\n\n async def store_message(self) -> Message:\n message = Message(text=self.message) if isinstance(self.message, str) else self.message\n\n message.session_id = self.session_id or message.session_id\n message.sender = self.sender or message.sender or MESSAGE_SENDER_AI\n message.sender_name = self.sender_name or message.sender_name or MESSAGE_SENDER_NAME_AI\n\n stored_messages: list[Message] = []\n\n if self.memory:\n self.memory.session_id = message.session_id\n lc_message = message.to_lc_message()\n await self.memory.aadd_messages([lc_message])\n\n stored_messages = await self.memory.aget_messages() or []\n\n stored_messages = [Message.from_lc_message(m) for m in stored_messages] if stored_messages else []\n\n if message.sender:\n stored_messages = [m for m in stored_messages if m.sender == message.sender]\n else:\n await astore_message(message, flow_id=self.graph.flow_id)\n stored_messages = (\n await aget_messages(\n session_id=message.session_id, sender_name=message.sender_name, sender=message.sender\n )\n or []\n )\n\n if not stored_messages:\n msg = \"No messages were stored. Please ensure that the session ID and sender are properly set.\"\n raise ValueError(msg)\n\n stored_message = stored_messages[0]\n self.status = stored_message\n return stored_message\n\n def update_build_config(\n self,\n build_config: dotdict,\n field_value: Any, # noqa: ARG002\n field_name: str | None = None, # noqa: ARG002\n ) -> dotdict:\n return set_current_fields(\n build_config=build_config,\n action_fields=self.mode_config,\n selected_action=build_config[\"mode\"][\"value\"],\n default_fields=self.default_keys,\n func=set_field_display,\n )\n"
},
"memory": {
"_input_type": "HandleInput",
@@ -695,7 +695,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"template": {
"_input_type": "PromptInput",
@@ -1870,7 +1870,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
@@ -2201,7 +2201,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Diet Analysis.json b/src/backend/base/langflow/initial_setup/starter_projects/Diet Analysis.json
index 1684d8d5f..f7c60660e 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Diet Analysis.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Diet Analysis.json
@@ -202,7 +202,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
@@ -467,7 +467,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"template": {
"_input_type": "PromptInput",
@@ -640,7 +640,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import requests\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\nfrom typing_extensions import override\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.novita_constants import MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n IntInput,\n SecretStrInput,\n SliderInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass NovitaModelComponent(LCModelComponent):\n display_name = \"Novita AI\"\n description = \"Generates text using Novita AI LLMs (OpenAI compatible).\"\n icon = \"Novita\"\n name = \"NovitaModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=MODEL_NAMES,\n value=MODEL_NAMES[0],\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Novita API Key\",\n info=\"The Novita API Key to use for Novita AI models.\",\n advanced=False,\n value=\"NOVITA_API_KEY\",\n real_time_refresh=True,\n ),\n SliderInput(name=\"temperature\", display_name=\"Temperature\", value=0.1, range_spec=RangeSpec(min=0, max=1)),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def get_models(self) -> list[str]:\n base_url = \"https://api.novita.ai/v3/openai\"\n url = f\"{base_url}/models\"\n\n headers = {\"Content-Type\": \"application/json\"}\n\n try:\n response = requests.get(url, headers=headers, timeout=10)\n response.raise_for_status()\n model_list = response.json()\n return [model[\"id\"] for model in model_list.get(\"data\", [])]\n except requests.RequestException as e:\n self.status = f\"Error fetching models: {e}\"\n return MODEL_NAMES\n\n @override\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n if field_name in {\"api_key\", \"model_name\"}:\n models = self.get_models()\n build_config[\"model_name\"][\"options\"] = models\n return build_config\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n json_mode = self.json_mode\n seed = self.seed\n\n try:\n output = ChatOpenAI(\n model=model_name,\n api_key=(SecretStr(api_key).get_secret_value() if api_key else None),\n max_tokens=max_tokens or None,\n temperature=temperature,\n model_kwargs=model_kwargs,\n streaming=self.stream,\n seed=seed,\n base_url=\"https://api.novita.ai/v3/openai\",\n )\n except Exception as e:\n msg = \"Could not connect to Novita API.\"\n raise ValueError(msg) from e\n\n if json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n"
+ "value": "import requests\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\nfrom typing_extensions import override\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.novita_constants import MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n HandleInput,\n IntInput,\n SecretStrInput,\n SliderInput,\n)\n\n\nclass NovitaModelComponent(LCModelComponent):\n display_name = \"Novita AI\"\n description = \"Generates text using Novita AI LLMs (OpenAI compatible).\"\n icon = \"Novita\"\n name = \"NovitaModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=MODEL_NAMES,\n value=MODEL_NAMES[0],\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Novita API Key\",\n info=\"The Novita API Key to use for Novita AI models.\",\n advanced=False,\n value=\"NOVITA_API_KEY\",\n real_time_refresh=True,\n ),\n SliderInput(name=\"temperature\", display_name=\"Temperature\", value=0.1, range_spec=RangeSpec(min=0, max=1)),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def get_models(self) -> list[str]:\n base_url = \"https://api.novita.ai/v3/openai\"\n url = f\"{base_url}/models\"\n\n headers = {\"Content-Type\": \"application/json\"}\n\n try:\n response = requests.get(url, headers=headers, timeout=10)\n response.raise_for_status()\n model_list = response.json()\n return [model[\"id\"] for model in model_list.get(\"data\", [])]\n except requests.RequestException as e:\n self.status = f\"Error fetching models: {e}\"\n return MODEL_NAMES\n\n @override\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n if field_name in {\"api_key\", \"model_name\"}:\n models = self.get_models()\n build_config[\"model_name\"][\"options\"] = models\n return build_config\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n json_mode = self.json_mode\n seed = self.seed\n\n try:\n output = ChatOpenAI(\n model=model_name,\n api_key=(SecretStr(api_key).get_secret_value() if api_key else None),\n max_tokens=max_tokens or None,\n temperature=temperature,\n model_kwargs=model_kwargs,\n streaming=self.stream,\n seed=seed,\n base_url=\"https://api.novita.ai/v3/openai\",\n )\n except Exception as e:\n msg = \"Could not connect to Novita API.\"\n raise ValueError(msg) from e\n\n if json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n"
},
"input_value": {
"_input_type": "MessageInput",
@@ -1037,7 +1037,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Document Q&A.json b/src/backend/base/langflow/initial_setup/starter_projects/Document Q&A.json
index 45a4a1efa..bc6610a28 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Document Q&A.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Document Q&A.json
@@ -252,7 +252,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"advanced": true,
@@ -563,7 +563,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -834,7 +834,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data import BaseFileComponent\nfrom langflow.base.data.utils import TEXT_FILE_TYPES, parallel_load_data, parse_text_file_to_data\nfrom langflow.io import BoolInput, IntInput\nfrom langflow.schema import Data\n\n\nclass FileComponent(BaseFileComponent):\n \"\"\"Handles loading and processing of individual or zipped text files.\n\n This component supports processing multiple valid files within a zip archive,\n resolving paths, validating file types, and optionally using multithreading for processing.\n \"\"\"\n\n display_name = \"File\"\n description = \"Loads content from one or more files as a DataFrame.\"\n icon = \"file-text\"\n name = \"File\"\n\n VALID_EXTENSIONS = TEXT_FILE_TYPES\n\n inputs = [\n *BaseFileComponent._base_inputs,\n BoolInput(\n name=\"use_multithreading\",\n display_name=\"[Deprecated] Use Multithreading\",\n advanced=True,\n value=True,\n info=\"Set 'Processing Concurrency' greater than 1 to enable multithreading.\",\n ),\n IntInput(\n name=\"concurrency_multithreading\",\n display_name=\"Processing Concurrency\",\n advanced=True,\n info=\"When multiple files are being processed, the number of files to process concurrently.\",\n value=1,\n ),\n ]\n\n outputs = [\n *BaseFileComponent._base_outputs,\n ]\n\n def process_files(self, file_list: list[BaseFileComponent.BaseFile]) -> list[BaseFileComponent.BaseFile]:\n \"\"\"Processes files either sequentially or in parallel, depending on concurrency settings.\n\n Args:\n file_list (list[BaseFileComponent.BaseFile]): List of files to process.\n\n Returns:\n list[BaseFileComponent.BaseFile]: Updated list of files with merged data.\n \"\"\"\n\n def process_file(file_path: str, *, silent_errors: bool = False) -> Data | None:\n \"\"\"Processes a single file and returns its Data object.\"\"\"\n try:\n return parse_text_file_to_data(file_path, silent_errors=silent_errors)\n except FileNotFoundError as e:\n msg = f\"File not found: {file_path}. Error: {e}\"\n self.log(msg)\n if not silent_errors:\n raise\n return None\n except Exception as e:\n msg = f\"Unexpected error processing {file_path}: {e}\"\n self.log(msg)\n if not silent_errors:\n raise\n return None\n\n if not file_list:\n msg = \"No files to process.\"\n raise ValueError(msg)\n\n concurrency = 1 if not self.use_multithreading else max(1, self.concurrency_multithreading)\n file_count = len(file_list)\n\n parallel_processing_threshold = 2\n if concurrency < parallel_processing_threshold or file_count < parallel_processing_threshold:\n if file_count > 1:\n self.log(f\"Processing {file_count} files sequentially.\")\n processed_data = [process_file(str(file.path), silent_errors=self.silent_errors) for file in file_list]\n else:\n self.log(f\"Starting parallel processing of {file_count} files with concurrency: {concurrency}.\")\n file_paths = [str(file.path) for file in file_list]\n processed_data = parallel_load_data(\n file_paths,\n silent_errors=self.silent_errors,\n load_function=process_file,\n max_concurrency=concurrency,\n )\n\n # Use rollup_basefile_data to merge processed data with BaseFile objects\n return self.rollup_data(file_list, processed_data)\n"
+ "value": "from langflow.base.data.base_file import BaseFileComponent\nfrom langflow.base.data.utils import TEXT_FILE_TYPES, parallel_load_data, parse_text_file_to_data\nfrom langflow.io import BoolInput, IntInput\nfrom langflow.schema.data import Data\n\n\nclass FileComponent(BaseFileComponent):\n \"\"\"Handles loading and processing of individual or zipped text files.\n\n This component supports processing multiple valid files within a zip archive,\n resolving paths, validating file types, and optionally using multithreading for processing.\n \"\"\"\n\n display_name = \"File\"\n description = \"Loads content from one or more files as a DataFrame.\"\n icon = \"file-text\"\n name = \"File\"\n\n VALID_EXTENSIONS = TEXT_FILE_TYPES\n\n inputs = [\n *BaseFileComponent._base_inputs,\n BoolInput(\n name=\"use_multithreading\",\n display_name=\"[Deprecated] Use Multithreading\",\n advanced=True,\n value=True,\n info=\"Set 'Processing Concurrency' greater than 1 to enable multithreading.\",\n ),\n IntInput(\n name=\"concurrency_multithreading\",\n display_name=\"Processing Concurrency\",\n advanced=True,\n info=\"When multiple files are being processed, the number of files to process concurrently.\",\n value=1,\n ),\n ]\n\n outputs = [\n *BaseFileComponent._base_outputs,\n ]\n\n def process_files(self, file_list: list[BaseFileComponent.BaseFile]) -> list[BaseFileComponent.BaseFile]:\n \"\"\"Processes files either sequentially or in parallel, depending on concurrency settings.\n\n Args:\n file_list (list[BaseFileComponent.BaseFile]): List of files to process.\n\n Returns:\n list[BaseFileComponent.BaseFile]: Updated list of files with merged data.\n \"\"\"\n\n def process_file(file_path: str, *, silent_errors: bool = False) -> Data | None:\n \"\"\"Processes a single file and returns its Data object.\"\"\"\n try:\n return parse_text_file_to_data(file_path, silent_errors=silent_errors)\n except FileNotFoundError as e:\n msg = f\"File not found: {file_path}. Error: {e}\"\n self.log(msg)\n if not silent_errors:\n raise\n return None\n except Exception as e:\n msg = f\"Unexpected error processing {file_path}: {e}\"\n self.log(msg)\n if not silent_errors:\n raise\n return None\n\n if not file_list:\n msg = \"No files to process.\"\n raise ValueError(msg)\n\n concurrency = 1 if not self.use_multithreading else max(1, self.concurrency_multithreading)\n file_count = len(file_list)\n\n parallel_processing_threshold = 2\n if concurrency < parallel_processing_threshold or file_count < parallel_processing_threshold:\n if file_count > 1:\n self.log(f\"Processing {file_count} files sequentially.\")\n processed_data = [process_file(str(file.path), silent_errors=self.silent_errors) for file in file_list]\n else:\n self.log(f\"Starting parallel processing of {file_count} files with concurrency: {concurrency}.\")\n file_paths = [str(file.path) for file in file_list]\n processed_data = parallel_load_data(\n file_paths,\n silent_errors=self.silent_errors,\n load_function=process_file,\n max_concurrency=concurrency,\n )\n\n # Use rollup_basefile_data to merge processed data with BaseFile objects\n return self.rollup_data(file_list, processed_data)\n"
},
"concurrency_multithreading": {
"_input_type": "IntInput",
@@ -1130,7 +1130,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"template": {
"advanced": false,
@@ -1308,7 +1308,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
+ "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
},
"input_value": {
"_input_type": "MessageInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Financial Agent.json b/src/backend/base/langflow/initial_setup/starter_projects/Financial Agent.json
index 63894033f..885454ea6 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Financial Agent.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Financial Agent.json
@@ -425,7 +425,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
@@ -725,7 +725,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import httpx\nfrom loguru import logger\n\nfrom langflow.custom import Component\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, Output, SecretStrInput\nfrom langflow.schema import Data\nfrom langflow.schema.dataframe import DataFrame\n\n\nclass TavilySearchComponent(Component):\n display_name = \"Tavily Search API\"\n description = \"\"\"**Tavily Search** is a search engine optimized for LLMs and RAG, \\\n aimed at efficient, quick, and persistent search results.\"\"\"\n icon = \"TavilyIcon\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Tavily API Key\",\n required=True,\n info=\"Your Tavily API Key.\",\n ),\n MessageTextInput(\n name=\"query\",\n display_name=\"Search Query\",\n info=\"The search query you want to execute with Tavily.\",\n tool_mode=True,\n ),\n DropdownInput(\n name=\"search_depth\",\n display_name=\"Search Depth\",\n info=\"The depth of the search.\",\n options=[\"basic\", \"advanced\"],\n value=\"advanced\",\n advanced=True,\n ),\n IntInput(\n name=\"chunks_per_source\",\n display_name=\"Chunks Per Source\",\n info=(\"The number of content chunks to retrieve from each source (1-3). Only works with advanced search.\"),\n value=3,\n advanced=True,\n ),\n DropdownInput(\n name=\"topic\",\n display_name=\"Search Topic\",\n info=\"The category of the search.\",\n options=[\"general\", \"news\"],\n value=\"general\",\n advanced=True,\n ),\n IntInput(\n name=\"days\",\n display_name=\"Days\",\n info=\"Number of days back from current date to include. Only available with news topic.\",\n value=7,\n advanced=True,\n ),\n IntInput(\n name=\"max_results\",\n display_name=\"Max Results\",\n info=\"The maximum number of search results to return.\",\n value=5,\n advanced=True,\n ),\n BoolInput(\n name=\"include_answer\",\n display_name=\"Include Answer\",\n info=\"Include a short answer to original query.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"time_range\",\n display_name=\"Time Range\",\n info=\"The time range back from the current date to filter results.\",\n options=[\"day\", \"week\", \"month\", \"year\"],\n value=None, # Default to None to make it optional\n advanced=True,\n ),\n BoolInput(\n name=\"include_images\",\n display_name=\"Include Images\",\n info=\"Include a list of query-related images in the response.\",\n value=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"include_domains\",\n display_name=\"Include Domains\",\n info=\"Comma-separated list of domains to include in the search results.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"exclude_domains\",\n display_name=\"Exclude Domains\",\n info=\"Comma-separated list of domains to exclude from the search results.\",\n advanced=True,\n ),\n BoolInput(\n name=\"include_raw_content\",\n display_name=\"Include Raw Content\",\n info=\"Include the cleaned and parsed HTML content of each search result.\",\n value=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"fetch_content_dataframe\"),\n ]\n\n def fetch_content(self) -> list[Data]:\n try:\n # Only process domains if they're provided\n include_domains = None\n exclude_domains = None\n\n if self.include_domains:\n include_domains = [domain.strip() for domain in self.include_domains.split(\",\") if domain.strip()]\n\n if self.exclude_domains:\n exclude_domains = [domain.strip() for domain in self.exclude_domains.split(\",\") if domain.strip()]\n\n url = \"https://api.tavily.com/search\"\n headers = {\n \"content-type\": \"application/json\",\n \"accept\": \"application/json\",\n }\n\n payload = {\n \"api_key\": self.api_key,\n \"query\": self.query,\n \"search_depth\": self.search_depth,\n \"topic\": self.topic,\n \"max_results\": self.max_results,\n \"include_images\": self.include_images,\n \"include_answer\": self.include_answer,\n \"include_raw_content\": self.include_raw_content,\n \"days\": self.days,\n \"time_range\": self.time_range,\n }\n\n # Only add domains to payload if they exist and have values\n if include_domains:\n payload[\"include_domains\"] = include_domains\n if exclude_domains:\n payload[\"exclude_domains\"] = exclude_domains\n\n # Add conditional parameters only if they should be included\n if self.search_depth == \"advanced\" and self.chunks_per_source:\n payload[\"chunks_per_source\"] = self.chunks_per_source\n\n if self.topic == \"news\" and self.days:\n payload[\"days\"] = int(self.days) # Ensure days is an integer\n\n # Add time_range if it's set\n if hasattr(self, \"time_range\") and self.time_range:\n payload[\"time_range\"] = self.time_range\n\n # Add timeout handling\n with httpx.Client(timeout=90.0) as client:\n response = client.post(url, json=payload, headers=headers)\n\n response.raise_for_status()\n search_results = response.json()\n\n data_results = []\n\n if self.include_answer and search_results.get(\"answer\"):\n data_results.append(Data(text=search_results[\"answer\"]))\n\n for result in search_results.get(\"results\", []):\n content = result.get(\"content\", \"\")\n result_data = {\n \"title\": result.get(\"title\"),\n \"url\": result.get(\"url\"),\n \"content\": content,\n \"score\": result.get(\"score\"),\n }\n if self.include_raw_content:\n result_data[\"raw_content\"] = result.get(\"raw_content\")\n\n data_results.append(Data(text=content, data=result_data))\n\n if self.include_images and search_results.get(\"images\"):\n data_results.append(Data(text=\"Images found\", data={\"images\": search_results[\"images\"]}))\n\n except httpx.TimeoutException:\n error_message = \"Request timed out (90s). Please try again or adjust parameters.\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.HTTPStatusError as exc:\n error_message = f\"HTTP error occurred: {exc.response.status_code} - {exc.response.text}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.RequestError as exc:\n error_message = f\"Request error occurred: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except ValueError as exc:\n error_message = f\"Invalid response format: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n else:\n self.status = data_results\n return data_results\n\n def fetch_content_dataframe(self) -> DataFrame:\n data = self.fetch_content()\n return DataFrame(data)\n"
+ "value": "import httpx\nfrom loguru import logger\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.template.field.base import Output\n\n\nclass TavilySearchComponent(Component):\n display_name = \"Tavily Search API\"\n description = \"\"\"**Tavily Search** is a search engine optimized for LLMs and RAG, \\\n aimed at efficient, quick, and persistent search results.\"\"\"\n icon = \"TavilyIcon\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Tavily API Key\",\n required=True,\n info=\"Your Tavily API Key.\",\n ),\n MessageTextInput(\n name=\"query\",\n display_name=\"Search Query\",\n info=\"The search query you want to execute with Tavily.\",\n tool_mode=True,\n ),\n DropdownInput(\n name=\"search_depth\",\n display_name=\"Search Depth\",\n info=\"The depth of the search.\",\n options=[\"basic\", \"advanced\"],\n value=\"advanced\",\n advanced=True,\n ),\n IntInput(\n name=\"chunks_per_source\",\n display_name=\"Chunks Per Source\",\n info=(\"The number of content chunks to retrieve from each source (1-3). Only works with advanced search.\"),\n value=3,\n advanced=True,\n ),\n DropdownInput(\n name=\"topic\",\n display_name=\"Search Topic\",\n info=\"The category of the search.\",\n options=[\"general\", \"news\"],\n value=\"general\",\n advanced=True,\n ),\n IntInput(\n name=\"days\",\n display_name=\"Days\",\n info=\"Number of days back from current date to include. Only available with news topic.\",\n value=7,\n advanced=True,\n ),\n IntInput(\n name=\"max_results\",\n display_name=\"Max Results\",\n info=\"The maximum number of search results to return.\",\n value=5,\n advanced=True,\n ),\n BoolInput(\n name=\"include_answer\",\n display_name=\"Include Answer\",\n info=\"Include a short answer to original query.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"time_range\",\n display_name=\"Time Range\",\n info=\"The time range back from the current date to filter results.\",\n options=[\"day\", \"week\", \"month\", \"year\"],\n value=None, # Default to None to make it optional\n advanced=True,\n ),\n BoolInput(\n name=\"include_images\",\n display_name=\"Include Images\",\n info=\"Include a list of query-related images in the response.\",\n value=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"include_domains\",\n display_name=\"Include Domains\",\n info=\"Comma-separated list of domains to include in the search results.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"exclude_domains\",\n display_name=\"Exclude Domains\",\n info=\"Comma-separated list of domains to exclude from the search results.\",\n advanced=True,\n ),\n BoolInput(\n name=\"include_raw_content\",\n display_name=\"Include Raw Content\",\n info=\"Include the cleaned and parsed HTML content of each search result.\",\n value=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"fetch_content_dataframe\"),\n ]\n\n def fetch_content(self) -> list[Data]:\n try:\n # Only process domains if they're provided\n include_domains = None\n exclude_domains = None\n\n if self.include_domains:\n include_domains = [domain.strip() for domain in self.include_domains.split(\",\") if domain.strip()]\n\n if self.exclude_domains:\n exclude_domains = [domain.strip() for domain in self.exclude_domains.split(\",\") if domain.strip()]\n\n url = \"https://api.tavily.com/search\"\n headers = {\n \"content-type\": \"application/json\",\n \"accept\": \"application/json\",\n }\n\n payload = {\n \"api_key\": self.api_key,\n \"query\": self.query,\n \"search_depth\": self.search_depth,\n \"topic\": self.topic,\n \"max_results\": self.max_results,\n \"include_images\": self.include_images,\n \"include_answer\": self.include_answer,\n \"include_raw_content\": self.include_raw_content,\n \"days\": self.days,\n \"time_range\": self.time_range,\n }\n\n # Only add domains to payload if they exist and have values\n if include_domains:\n payload[\"include_domains\"] = include_domains\n if exclude_domains:\n payload[\"exclude_domains\"] = exclude_domains\n\n # Add conditional parameters only if they should be included\n if self.search_depth == \"advanced\" and self.chunks_per_source:\n payload[\"chunks_per_source\"] = self.chunks_per_source\n\n if self.topic == \"news\" and self.days:\n payload[\"days\"] = int(self.days) # Ensure days is an integer\n\n # Add time_range if it's set\n if hasattr(self, \"time_range\") and self.time_range:\n payload[\"time_range\"] = self.time_range\n\n # Add timeout handling\n with httpx.Client(timeout=90.0) as client:\n response = client.post(url, json=payload, headers=headers)\n\n response.raise_for_status()\n search_results = response.json()\n\n data_results = []\n\n if self.include_answer and search_results.get(\"answer\"):\n data_results.append(Data(text=search_results[\"answer\"]))\n\n for result in search_results.get(\"results\", []):\n content = result.get(\"content\", \"\")\n result_data = {\n \"title\": result.get(\"title\"),\n \"url\": result.get(\"url\"),\n \"content\": content,\n \"score\": result.get(\"score\"),\n }\n if self.include_raw_content:\n result_data[\"raw_content\"] = result.get(\"raw_content\")\n\n data_results.append(Data(text=content, data=result_data))\n\n if self.include_images and search_results.get(\"images\"):\n data_results.append(Data(text=\"Images found\", data={\"images\": search_results[\"images\"]}))\n\n except httpx.TimeoutException:\n error_message = \"Request timed out (90s). Please try again or adjust parameters.\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.HTTPStatusError as exc:\n error_message = f\"HTTP error occurred: {exc.response.status_code} - {exc.response.text}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.RequestError as exc:\n error_message = f\"Request error occurred: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except ValueError as exc:\n error_message = f\"Invalid response format: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n else:\n self.status = data_results\n return data_results\n\n def fetch_content_dataframe(self) -> DataFrame:\n data = self.fetch_content()\n return DataFrame(data)\n"
},
"days": {
"_input_type": "IntInput",
@@ -1154,7 +1154,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import ast\nimport pprint\nfrom enum import Enum\n\nimport yfinance as yf\nfrom langchain_core.tools import ToolException\nfrom loguru import logger\nfrom pydantic import BaseModel, Field\n\nfrom langflow.custom import Component\nfrom langflow.inputs import DropdownInput, IntInput, MessageTextInput\nfrom langflow.io import Output\nfrom langflow.schema import Data, DataFrame\n\n\nclass YahooFinanceMethod(Enum):\n GET_INFO = \"get_info\"\n GET_NEWS = \"get_news\"\n GET_ACTIONS = \"get_actions\"\n GET_ANALYSIS = \"get_analysis\"\n GET_BALANCE_SHEET = \"get_balance_sheet\"\n GET_CALENDAR = \"get_calendar\"\n GET_CASHFLOW = \"get_cashflow\"\n GET_INSTITUTIONAL_HOLDERS = \"get_institutional_holders\"\n GET_RECOMMENDATIONS = \"get_recommendations\"\n GET_SUSTAINABILITY = \"get_sustainability\"\n GET_MAJOR_HOLDERS = \"get_major_holders\"\n GET_MUTUALFUND_HOLDERS = \"get_mutualfund_holders\"\n GET_INSIDER_PURCHASES = \"get_insider_purchases\"\n GET_INSIDER_TRANSACTIONS = \"get_insider_transactions\"\n GET_INSIDER_ROSTER_HOLDERS = \"get_insider_roster_holders\"\n GET_DIVIDENDS = \"get_dividends\"\n GET_CAPITAL_GAINS = \"get_capital_gains\"\n GET_SPLITS = \"get_splits\"\n GET_SHARES = \"get_shares\"\n GET_FAST_INFO = \"get_fast_info\"\n GET_SEC_FILINGS = \"get_sec_filings\"\n GET_RECOMMENDATIONS_SUMMARY = \"get_recommendations_summary\"\n GET_UPGRADES_DOWNGRADES = \"get_upgrades_downgrades\"\n GET_EARNINGS = \"get_earnings\"\n GET_INCOME_STMT = \"get_income_stmt\"\n\n\nclass YahooFinanceSchema(BaseModel):\n symbol: str = Field(..., description=\"The stock symbol to retrieve data for.\")\n method: YahooFinanceMethod = Field(YahooFinanceMethod.GET_INFO, description=\"The type of data to retrieve.\")\n num_news: int | None = Field(5, description=\"The number of news articles to retrieve.\")\n\n\nclass YfinanceComponent(Component):\n display_name = \"Yahoo Finance\"\n description = \"\"\"Uses [yfinance](https://pypi.org/project/yfinance/) (unofficial package) \\\nto access financial data and market information from Yahoo Finance.\"\"\"\n icon = \"trending-up\"\n\n inputs = [\n MessageTextInput(\n name=\"symbol\",\n display_name=\"Stock Symbol\",\n info=\"The stock symbol to retrieve data for (e.g., AAPL, GOOG).\",\n tool_mode=True,\n ),\n DropdownInput(\n name=\"method\",\n display_name=\"Data Method\",\n info=\"The type of data to retrieve.\",\n options=list(YahooFinanceMethod),\n value=\"get_news\",\n ),\n IntInput(\n name=\"num_news\",\n display_name=\"Number of News\",\n info=\"The number of news articles to retrieve (only applicable for get_news).\",\n value=5,\n ),\n ]\n\n outputs = [\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"fetch_content_dataframe\"),\n ]\n\n def run_model(self) -> DataFrame:\n return self.fetch_content_dataframe()\n\n def _fetch_yfinance_data(self, ticker: yf.Ticker, method: YahooFinanceMethod, num_news: int | None) -> str:\n try:\n if method == YahooFinanceMethod.GET_INFO:\n result = ticker.info\n elif method == YahooFinanceMethod.GET_NEWS:\n result = ticker.news[:num_news]\n else:\n result = getattr(ticker, method.value)()\n return pprint.pformat(result)\n except Exception as e:\n error_message = f\"Error retrieving data: {e}\"\n logger.debug(error_message)\n self.status = error_message\n raise ToolException(error_message) from e\n\n def fetch_content(self) -> list[Data]:\n try:\n return self._yahoo_finance_tool(\n self.symbol,\n YahooFinanceMethod(self.method),\n self.num_news,\n )\n except ToolException:\n raise\n except Exception as e:\n error_message = f\"Unexpected error: {e}\"\n logger.debug(error_message)\n self.status = error_message\n raise ToolException(error_message) from e\n\n def _yahoo_finance_tool(\n self,\n symbol: str,\n method: YahooFinanceMethod,\n num_news: int | None = 5,\n ) -> list[Data]:\n ticker = yf.Ticker(symbol)\n result = self._fetch_yfinance_data(ticker, method, num_news)\n\n if method == YahooFinanceMethod.GET_NEWS:\n data_list = [\n Data(text=f\"{article['title']}: {article['link']}\", data=article)\n for article in ast.literal_eval(result)\n ]\n else:\n data_list = [Data(text=result, data={\"result\": result})]\n\n return data_list\n\n def fetch_content_dataframe(self) -> DataFrame:\n data = self.fetch_content()\n return DataFrame(data)\n"
+ "value": "import ast\nimport pprint\nfrom enum import Enum\n\nimport yfinance as yf\nfrom langchain_core.tools import ToolException\nfrom loguru import logger\nfrom pydantic import BaseModel, Field\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DropdownInput, IntInput, MessageTextInput\nfrom langflow.io import Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\n\n\nclass YahooFinanceMethod(Enum):\n GET_INFO = \"get_info\"\n GET_NEWS = \"get_news\"\n GET_ACTIONS = \"get_actions\"\n GET_ANALYSIS = \"get_analysis\"\n GET_BALANCE_SHEET = \"get_balance_sheet\"\n GET_CALENDAR = \"get_calendar\"\n GET_CASHFLOW = \"get_cashflow\"\n GET_INSTITUTIONAL_HOLDERS = \"get_institutional_holders\"\n GET_RECOMMENDATIONS = \"get_recommendations\"\n GET_SUSTAINABILITY = \"get_sustainability\"\n GET_MAJOR_HOLDERS = \"get_major_holders\"\n GET_MUTUALFUND_HOLDERS = \"get_mutualfund_holders\"\n GET_INSIDER_PURCHASES = \"get_insider_purchases\"\n GET_INSIDER_TRANSACTIONS = \"get_insider_transactions\"\n GET_INSIDER_ROSTER_HOLDERS = \"get_insider_roster_holders\"\n GET_DIVIDENDS = \"get_dividends\"\n GET_CAPITAL_GAINS = \"get_capital_gains\"\n GET_SPLITS = \"get_splits\"\n GET_SHARES = \"get_shares\"\n GET_FAST_INFO = \"get_fast_info\"\n GET_SEC_FILINGS = \"get_sec_filings\"\n GET_RECOMMENDATIONS_SUMMARY = \"get_recommendations_summary\"\n GET_UPGRADES_DOWNGRADES = \"get_upgrades_downgrades\"\n GET_EARNINGS = \"get_earnings\"\n GET_INCOME_STMT = \"get_income_stmt\"\n\n\nclass YahooFinanceSchema(BaseModel):\n symbol: str = Field(..., description=\"The stock symbol to retrieve data for.\")\n method: YahooFinanceMethod = Field(YahooFinanceMethod.GET_INFO, description=\"The type of data to retrieve.\")\n num_news: int | None = Field(5, description=\"The number of news articles to retrieve.\")\n\n\nclass YfinanceComponent(Component):\n display_name = \"Yahoo Finance\"\n description = \"\"\"Uses [yfinance](https://pypi.org/project/yfinance/) (unofficial package) \\\nto access financial data and market information from Yahoo Finance.\"\"\"\n icon = \"trending-up\"\n\n inputs = [\n MessageTextInput(\n name=\"symbol\",\n display_name=\"Stock Symbol\",\n info=\"The stock symbol to retrieve data for (e.g., AAPL, GOOG).\",\n tool_mode=True,\n ),\n DropdownInput(\n name=\"method\",\n display_name=\"Data Method\",\n info=\"The type of data to retrieve.\",\n options=list(YahooFinanceMethod),\n value=\"get_news\",\n ),\n IntInput(\n name=\"num_news\",\n display_name=\"Number of News\",\n info=\"The number of news articles to retrieve (only applicable for get_news).\",\n value=5,\n ),\n ]\n\n outputs = [\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"fetch_content_dataframe\"),\n ]\n\n def run_model(self) -> DataFrame:\n return self.fetch_content_dataframe()\n\n def _fetch_yfinance_data(self, ticker: yf.Ticker, method: YahooFinanceMethod, num_news: int | None) -> str:\n try:\n if method == YahooFinanceMethod.GET_INFO:\n result = ticker.info\n elif method == YahooFinanceMethod.GET_NEWS:\n result = ticker.news[:num_news]\n else:\n result = getattr(ticker, method.value)()\n return pprint.pformat(result)\n except Exception as e:\n error_message = f\"Error retrieving data: {e}\"\n logger.debug(error_message)\n self.status = error_message\n raise ToolException(error_message) from e\n\n def fetch_content(self) -> list[Data]:\n try:\n return self._yahoo_finance_tool(\n self.symbol,\n YahooFinanceMethod(self.method),\n self.num_news,\n )\n except ToolException:\n raise\n except Exception as e:\n error_message = f\"Unexpected error: {e}\"\n logger.debug(error_message)\n self.status = error_message\n raise ToolException(error_message) from e\n\n def _yahoo_finance_tool(\n self,\n symbol: str,\n method: YahooFinanceMethod,\n num_news: int | None = 5,\n ) -> list[Data]:\n ticker = yf.Ticker(symbol)\n result = self._fetch_yfinance_data(ticker, method, num_news)\n\n if method == YahooFinanceMethod.GET_NEWS:\n data_list = [\n Data(text=f\"{article['title']}: {article['link']}\", data=article)\n for article in ast.literal_eval(result)\n ]\n else:\n data_list = [Data(text=result, data={\"result\": result})]\n\n return data_list\n\n def fetch_content_dataframe(self) -> DataFrame:\n data = self.fetch_content()\n return DataFrame(data)\n"
},
"method": {
"_input_type": "DropdownInput",
@@ -1468,7 +1468,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -2462,7 +2462,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
+ "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
},
"handle_parsing_errors": {
"_input_type": "BoolInput",
@@ -3210,7 +3210,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
+ "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
},
"handle_parsing_errors": {
"_input_type": "BoolInput",
@@ -3822,7 +3822,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"first_text": {
"advanced": false,
@@ -3999,7 +3999,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"first_text": {
"advanced": false,
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Financial Report Parser.json b/src/backend/base/langflow/initial_setup/starter_projects/Financial Report Parser.json
index 8fc4c2e24..f2bbce5d4 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Financial Report Parser.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Financial Report Parser.json
@@ -231,7 +231,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
+ "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
},
"input_value": {
"_input_type": "MessageInput",
@@ -591,7 +591,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.custom import Component\nfrom langflow.helpers.data import data_to_text, data_to_text_list\nfrom langflow.io import DataInput, MultilineInput, Output, StrInput\nfrom langflow.schema import Data\nfrom langflow.schema.message import Message\n\n\nclass ParseDataComponent(Component):\n display_name = \"Data to Message\"\n description = \"Convert Data objects into Messages using any {field_name} from input data.\"\n icon = \"message-square\"\n name = \"ParseData\"\n legacy = True\n metadata = {\n \"legacy_name\": \"Parse Data\",\n }\n\n inputs = [\n DataInput(\n name=\"data\",\n display_name=\"Data\",\n info=\"The data to convert to text.\",\n is_list=True,\n required=True,\n ),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. \"\n \"It can contain the keys {text}, {data} or any other key in the Data.\",\n value=\"{text}\",\n required=True,\n ),\n StrInput(name=\"sep\", display_name=\"Separator\", advanced=True, value=\"\\n\"),\n ]\n\n outputs = [\n Output(\n display_name=\"Message\",\n name=\"text\",\n info=\"Data as a single Message, with each input Data separated by Separator\",\n method=\"parse_data\",\n ),\n Output(\n display_name=\"Data List\",\n name=\"data_list\",\n info=\"Data as a list of new Data, each having `text` formatted by Template\",\n method=\"parse_data_as_list\",\n ),\n ]\n\n def _clean_args(self) -> tuple[list[Data], str, str]:\n data = self.data if isinstance(self.data, list) else [self.data]\n template = self.template\n sep = self.sep\n return data, template, sep\n\n def parse_data(self) -> Message:\n data, template, sep = self._clean_args()\n result_string = data_to_text(template, data, sep)\n self.status = result_string\n return Message(text=result_string)\n\n def parse_data_as_list(self) -> list[Data]:\n data, template, _ = self._clean_args()\n text_list, data_list = data_to_text_list(template, data)\n for item, text in zip(data_list, text_list, strict=True):\n item.set_text(text)\n self.status = data_list\n return data_list\n"
+ "value": "from langflow.custom.custom_component.component import Component\nfrom langflow.helpers.data import data_to_text, data_to_text_list\nfrom langflow.io import DataInput, MultilineInput, Output, StrInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.message import Message\n\n\nclass ParseDataComponent(Component):\n display_name = \"Data to Message\"\n description = \"Convert Data objects into Messages using any {field_name} from input data.\"\n icon = \"message-square\"\n name = \"ParseData\"\n legacy = True\n metadata = {\n \"legacy_name\": \"Parse Data\",\n }\n\n inputs = [\n DataInput(\n name=\"data\",\n display_name=\"Data\",\n info=\"The data to convert to text.\",\n is_list=True,\n required=True,\n ),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. \"\n \"It can contain the keys {text}, {data} or any other key in the Data.\",\n value=\"{text}\",\n required=True,\n ),\n StrInput(name=\"sep\", display_name=\"Separator\", advanced=True, value=\"\\n\"),\n ]\n\n outputs = [\n Output(\n display_name=\"Message\",\n name=\"text\",\n info=\"Data as a single Message, with each input Data separated by Separator\",\n method=\"parse_data\",\n ),\n Output(\n display_name=\"Data List\",\n name=\"data_list\",\n info=\"Data as a list of new Data, each having `text` formatted by Template\",\n method=\"parse_data_as_list\",\n ),\n ]\n\n def _clean_args(self) -> tuple[list[Data], str, str]:\n data = self.data if isinstance(self.data, list) else [self.data]\n template = self.template\n sep = self.sep\n return data, template, sep\n\n def parse_data(self) -> Message:\n data, template, sep = self._clean_args()\n result_string = data_to_text(template, data, sep)\n self.status = result_string\n return Message(text=result_string)\n\n def parse_data_as_list(self) -> list[Data]:\n data, template, _ = self._clean_args()\n text_list, data_list = data_to_text_list(template, data)\n for item, text in zip(data_list, text_list, strict=True):\n item.set_text(text)\n self.status = data_list\n return data_list\n"
},
"data": {
"_input_type": "DataInput",
@@ -811,7 +811,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -1105,7 +1105,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
@@ -1433,7 +1433,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from pydantic import BaseModel, Field, create_model\nfrom trustcall import create_extractor\n\nfrom langflow.base.models.chat_result import get_chat_result\nfrom langflow.custom import Component\nfrom langflow.helpers.base_model import build_model_from_schema\nfrom langflow.io import (\n BoolInput,\n HandleInput,\n MessageTextInput,\n MultilineInput,\n Output,\n TableInput,\n)\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.table import EditMode\n\n\nclass StructuredOutputComponent(Component):\n display_name = \"Structured Output\"\n description = (\n \"Transforms LLM responses into **structured data formats**. Ideal for extracting specific information \"\n \"or creating consistent outputs.\"\n )\n name = \"StructuredOutput\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"llm\",\n display_name=\"Language Model\",\n info=\"The language model to use to generate the structured output.\",\n input_types=[\"LanguageModel\"],\n required=True,\n ),\n MessageTextInput(\n name=\"input_value\",\n display_name=\"Input Message\",\n info=\"The input message to the language model.\",\n tool_mode=True,\n required=True,\n ),\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Format Instructions\",\n info=\"The instructions to the language model for formatting the output.\",\n value=(\n \"You are an AI system designed to extract structured information from unstructured text.\"\n \"Given the input_text, return a JSON object with predefined keys based on the expected structure.\"\n \"Extract values accurately and format them according to the specified type \"\n \"(e.g., string, integer, float, date).\"\n \"If a value is missing or cannot be determined, return a default \"\n \"(e.g., null, 0, or 'N/A').\"\n \"If multiple instances of the expected structure exist within the input_text, \"\n \"stream each as a separate JSON object.\"\n ),\n required=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"schema_name\",\n display_name=\"Schema Name\",\n info=\"Provide a name for the output data schema.\",\n advanced=True,\n ),\n TableInput(\n name=\"output_schema\",\n display_name=\"Output Schema\",\n info=\"Define the structure and data types for the model's output.\",\n required=True,\n # TODO: remove deault value\n table_schema=[\n {\n \"name\": \"name\",\n \"display_name\": \"Name\",\n \"type\": \"str\",\n \"description\": \"Specify the name of the output field.\",\n \"default\": \"field\",\n \"edit_mode\": EditMode.INLINE,\n },\n {\n \"name\": \"description\",\n \"display_name\": \"Description\",\n \"type\": \"str\",\n \"description\": \"Describe the purpose of the output field.\",\n \"default\": \"description of field\",\n \"edit_mode\": EditMode.POPOVER,\n },\n {\n \"name\": \"type\",\n \"display_name\": \"Type\",\n \"type\": \"str\",\n \"edit_mode\": EditMode.INLINE,\n \"description\": (\"Indicate the data type of the output field (e.g., str, int, float, bool, dict).\"),\n \"options\": [\"str\", \"int\", \"float\", \"bool\", \"dict\"],\n \"default\": \"str\",\n },\n ],\n value=[\n {\n \"name\": \"field\",\n \"description\": \"description of field\",\n \"type\": \"str\",\n \"multiple\": \"False\",\n }\n ],\n ),\n BoolInput(\n name=\"multiple\",\n advanced=True,\n display_name=\"Generate Multiple\",\n info=\"[Deprecated] Always set to True\",\n value=True,\n ),\n ]\n\n outputs = [\n Output(\n name=\"structured_output\",\n display_name=\"Structured Output\",\n method=\"build_structured_output\",\n ),\n Output(\n name=\"structured_output_dataframe\",\n display_name=\"DataFrame\",\n method=\"as_dataframe\",\n ),\n ]\n\n def build_structured_output_base(self) -> Data:\n schema_name = self.schema_name or \"OutputModel\"\n\n if not hasattr(self.llm, \"with_structured_output\"):\n msg = \"Language model does not support structured output.\"\n raise TypeError(msg)\n if not self.output_schema:\n msg = \"Output schema cannot be empty\"\n raise ValueError(msg)\n\n output_model_ = build_model_from_schema(self.output_schema)\n\n output_model = create_model(\n schema_name,\n __doc__=f\"A list of {schema_name}.\",\n objects=(list[output_model_], Field(description=f\"A list of {schema_name}.\")), # type: ignore[valid-type]\n )\n\n try:\n llm_with_structured_output = create_extractor(self.llm, tools=[output_model])\n except NotImplementedError as exc:\n msg = f\"{self.llm.__class__.__name__} does not support structured output.\"\n raise TypeError(msg) from exc\n config_dict = {\n \"run_name\": self.display_name,\n \"project_name\": self.get_project_name(),\n \"callbacks\": self.get_langchain_callbacks(),\n }\n result = get_chat_result(\n runnable=llm_with_structured_output,\n system_message=self.system_prompt,\n input_value=self.input_value,\n config=config_dict,\n )\n if isinstance(result, BaseModel):\n result = result.model_dump()\n if responses := result.get(\"responses\"):\n result = responses[0].model_dump()\n if result and \"objects\" in result:\n return result[\"objects\"]\n\n return result\n\n def build_structured_output(self) -> Data:\n output = self.build_structured_output_base()\n\n return Data(text_key=\"results\", data={\"results\": output})\n\n def as_dataframe(self) -> DataFrame:\n output = self.build_structured_output_base()\n if isinstance(output, list):\n return DataFrame(data=output)\n return DataFrame(data=[output])\n"
+ "value": "from pydantic import BaseModel, Field, create_model\nfrom trustcall import create_extractor\n\nfrom langflow.base.models.chat_result import get_chat_result\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.helpers.base_model import build_model_from_schema\nfrom langflow.io import (\n BoolInput,\n HandleInput,\n MessageTextInput,\n MultilineInput,\n Output,\n TableInput,\n)\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.table import EditMode\n\n\nclass StructuredOutputComponent(Component):\n display_name = \"Structured Output\"\n description = (\n \"Transforms LLM responses into **structured data formats**. Ideal for extracting specific information \"\n \"or creating consistent outputs.\"\n )\n name = \"StructuredOutput\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"llm\",\n display_name=\"Language Model\",\n info=\"The language model to use to generate the structured output.\",\n input_types=[\"LanguageModel\"],\n required=True,\n ),\n MessageTextInput(\n name=\"input_value\",\n display_name=\"Input Message\",\n info=\"The input message to the language model.\",\n tool_mode=True,\n required=True,\n ),\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Format Instructions\",\n info=\"The instructions to the language model for formatting the output.\",\n value=(\n \"You are an AI system designed to extract structured information from unstructured text.\"\n \"Given the input_text, return a JSON object with predefined keys based on the expected structure.\"\n \"Extract values accurately and format them according to the specified type \"\n \"(e.g., string, integer, float, date).\"\n \"If a value is missing or cannot be determined, return a default \"\n \"(e.g., null, 0, or 'N/A').\"\n \"If multiple instances of the expected structure exist within the input_text, \"\n \"stream each as a separate JSON object.\"\n ),\n required=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"schema_name\",\n display_name=\"Schema Name\",\n info=\"Provide a name for the output data schema.\",\n advanced=True,\n ),\n TableInput(\n name=\"output_schema\",\n display_name=\"Output Schema\",\n info=\"Define the structure and data types for the model's output.\",\n required=True,\n # TODO: remove deault value\n table_schema=[\n {\n \"name\": \"name\",\n \"display_name\": \"Name\",\n \"type\": \"str\",\n \"description\": \"Specify the name of the output field.\",\n \"default\": \"field\",\n \"edit_mode\": EditMode.INLINE,\n },\n {\n \"name\": \"description\",\n \"display_name\": \"Description\",\n \"type\": \"str\",\n \"description\": \"Describe the purpose of the output field.\",\n \"default\": \"description of field\",\n \"edit_mode\": EditMode.POPOVER,\n },\n {\n \"name\": \"type\",\n \"display_name\": \"Type\",\n \"type\": \"str\",\n \"edit_mode\": EditMode.INLINE,\n \"description\": (\"Indicate the data type of the output field (e.g., str, int, float, bool, dict).\"),\n \"options\": [\"str\", \"int\", \"float\", \"bool\", \"dict\"],\n \"default\": \"str\",\n },\n ],\n value=[\n {\n \"name\": \"field\",\n \"description\": \"description of field\",\n \"type\": \"str\",\n \"multiple\": \"False\",\n }\n ],\n ),\n BoolInput(\n name=\"multiple\",\n advanced=True,\n display_name=\"Generate Multiple\",\n info=\"[Deprecated] Always set to True\",\n value=True,\n ),\n ]\n\n outputs = [\n Output(\n name=\"structured_output\",\n display_name=\"Structured Output\",\n method=\"build_structured_output\",\n ),\n Output(\n name=\"structured_output_dataframe\",\n display_name=\"DataFrame\",\n method=\"as_dataframe\",\n ),\n ]\n\n def build_structured_output_base(self) -> Data:\n schema_name = self.schema_name or \"OutputModel\"\n\n if not hasattr(self.llm, \"with_structured_output\"):\n msg = \"Language model does not support structured output.\"\n raise TypeError(msg)\n if not self.output_schema:\n msg = \"Output schema cannot be empty\"\n raise ValueError(msg)\n\n output_model_ = build_model_from_schema(self.output_schema)\n\n output_model = create_model(\n schema_name,\n __doc__=f\"A list of {schema_name}.\",\n objects=(list[output_model_], Field(description=f\"A list of {schema_name}.\")), # type: ignore[valid-type]\n )\n\n try:\n llm_with_structured_output = create_extractor(self.llm, tools=[output_model])\n except NotImplementedError as exc:\n msg = f\"{self.llm.__class__.__name__} does not support structured output.\"\n raise TypeError(msg) from exc\n config_dict = {\n \"run_name\": self.display_name,\n \"project_name\": self.get_project_name(),\n \"callbacks\": self.get_langchain_callbacks(),\n }\n result = get_chat_result(\n runnable=llm_with_structured_output,\n system_message=self.system_prompt,\n input_value=self.input_value,\n config=config_dict,\n )\n if isinstance(result, BaseModel):\n result = result.model_dump()\n if responses := result.get(\"responses\"):\n result = responses[0].model_dump()\n if result and \"objects\" in result:\n return result[\"objects\"]\n\n return result\n\n def build_structured_output(self) -> Data:\n output = self.build_structured_output_base()\n\n return Data(text_key=\"results\", data={\"results\": output})\n\n def as_dataframe(self) -> DataFrame:\n output = self.build_structured_output_base()\n if isinstance(output, list):\n return DataFrame(data=output)\n return DataFrame(data=[output])\n"
},
"input_value": {
"_input_type": "MessageTextInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Gmail Agent.json b/src/backend/base/langflow/initial_setup/starter_projects/Gmail Agent.json
index e0a9febd9..74faf71ca 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Gmail Agent.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Gmail Agent.json
@@ -267,7 +267,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
+ "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
},
"handle_parsing_errors": {
"_input_type": "BoolInput",
@@ -918,7 +918,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
@@ -1249,7 +1249,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -1728,7 +1728,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import json\nfrom typing import Any\n\nfrom composio import Action\n\nfrom langflow.base.composio.composio_base import ComposioBaseComponent\nfrom langflow.inputs import (\n BoolInput,\n FileInput,\n IntInput,\n MessageTextInput,\n)\nfrom langflow.logging import logger\n\n\nclass ComposioGmailAPIComponent(ComposioBaseComponent):\n \"\"\"Gmail API component for interacting with Gmail services.\"\"\"\n\n display_name: str = \"Gmail\"\n name = \"GmailAPI\"\n icon = \"Google\"\n documentation: str = \"https://docs.composio.dev\"\n app_name = \"gmail\"\n\n # Gmail-specific actions\n _actions_data: dict = {\n \"GMAIL_SEND_EMAIL\": {\n \"display_name\": \"Send Email\",\n \"action_fields\": [\n \"recipient_email\",\n \"subject\",\n \"body\",\n \"cc\",\n \"bcc\",\n \"is_html\",\n \"gmail_user_id\",\n \"attachment\",\n ],\n },\n \"GMAIL_FETCH_EMAILS\": {\n \"display_name\": \"Fetch Emails\",\n \"action_fields\": [\n \"gmail_user_id\",\n \"max_results\",\n \"query\",\n \"page_token\",\n \"label_ids\",\n \"include_spam_trash\",\n ],\n \"get_result_field\": True,\n \"result_field\": \"messages\",\n },\n \"GMAIL_GET_PROFILE\": {\n \"display_name\": \"Get User Profile\",\n \"action_fields\": [\"gmail_user_id\"],\n },\n \"GMAIL_FETCH_MESSAGE_BY_MESSAGE_ID\": {\n \"display_name\": \"Get Email By ID\",\n \"action_fields\": [\"message_id\", \"gmail_user_id\", \"format\"],\n \"get_result_field\": False,\n },\n \"GMAIL_CREATE_EMAIL_DRAFT\": {\n \"display_name\": \"Create Draft Email\",\n \"action_fields\": [\n \"recipient_email\",\n \"subject\",\n \"body\",\n \"cc\",\n \"bcc\",\n \"is_html\",\n \"attachment\",\n \"gmail_user_id\",\n ],\n },\n \"GMAIL_FETCH_MESSAGE_BY_THREAD_ID\": {\n \"display_name\": \"Get Message By Thread ID\",\n \"action_fields\": [\"thread_id\", \"page_token\", \"gmail_user_id\"],\n \"get_result_field\": False,\n },\n \"GMAIL_LIST_THREADS\": {\n \"display_name\": \"List Email Threads\",\n \"action_fields\": [\"max_results\", \"query\", \"gmail_user_id\", \"page_token\"],\n \"get_result_field\": True,\n \"result_field\": \"threads\",\n },\n \"GMAIL_REPLY_TO_THREAD\": {\n \"display_name\": \"Reply To Thread\",\n \"action_fields\": [\"thread_id\", \"message_body\", \"recipient_email\", \"gmail_user_id\", \"cc\", \"bcc\", \"is_html\"],\n },\n \"GMAIL_LIST_LABELS\": {\n \"display_name\": \"List Email Labels\",\n \"action_fields\": [\"gmail_user_id\"],\n \"get_result_field\": True,\n \"result_field\": \"labels\",\n },\n \"GMAIL_CREATE_LABEL\": {\n \"display_name\": \"Create Email Label\",\n \"action_fields\": [\"label_name\", \"label_list_visibility\", \"message_list_visibility\", \"gmail_user_id\"],\n },\n \"GMAIL_GET_PEOPLE\": {\n \"display_name\": \"Get Contacts\",\n \"action_fields\": [\"resource_name\", \"person_fields\"],\n \"get_result_field\": True,\n \"result_field\": \"people_data\",\n },\n \"GMAIL_REMOVE_LABEL\": {\n \"display_name\": \"Delete Email Label\",\n \"action_fields\": [\"label_id\", \"gmail_user_id\"],\n \"get_result_field\": False,\n },\n \"GMAIL_GET_ATTACHMENT\": {\n \"display_name\": \"Get Attachment\",\n \"action_fields\": [\"message_id\", \"attachment_id\", \"file_name\", \"gmail_user_id\"],\n },\n }\n _all_fields = {field for action_data in _actions_data.values() for field in action_data[\"action_fields\"]}\n _bool_variables = {\"is_html\", \"include_spam_trash\"}\n\n # Combine base inputs with Gmail-specific inputs\n inputs = [\n *ComposioBaseComponent._base_inputs,\n # Email composition fields\n MessageTextInput(\n name=\"recipient_email\",\n display_name=\"Recipient Email\",\n info=\"Email address of the recipient\",\n show=False,\n required=True,\n advanced=False,\n ),\n MessageTextInput(\n name=\"subject\",\n display_name=\"Subject\",\n info=\"Subject of the email\",\n show=False,\n required=True,\n advanced=False,\n ),\n MessageTextInput(\n name=\"body\",\n display_name=\"Body\",\n required=True,\n info=\"Content of the email\",\n show=False,\n advanced=False,\n ),\n MessageTextInput(\n name=\"cc\",\n display_name=\"CC\",\n info=\"Email addresses to CC (Carbon Copy) in the email, separated by commas\",\n show=False,\n advanced=True,\n ),\n MessageTextInput(\n name=\"bcc\",\n display_name=\"BCC\",\n info=\"Email addresses to BCC (Blind Carbon Copy) in the email, separated by commas\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"is_html\",\n display_name=\"Is HTML\",\n info=\"Specify whether the email body contains HTML content (true/false)\",\n show=False,\n value=False,\n advanced=True,\n ),\n # Email retrieval and management fields\n MessageTextInput(\n name=\"gmail_user_id\",\n display_name=\"User ID\",\n info=\"The user's email address or 'me' for the authenticated user\",\n show=False,\n advanced=True,\n ),\n IntInput(\n name=\"max_results\",\n display_name=\"Max Results\",\n required=True,\n info=\"Maximum number of emails to be returned\",\n show=False,\n advanced=False,\n ),\n MessageTextInput(\n name=\"message_id\",\n display_name=\"Message ID\",\n info=\"The ID of the specific email message\",\n show=False,\n required=True,\n advanced=False,\n ),\n MessageTextInput(\n name=\"thread_id\",\n display_name=\"Thread ID\",\n info=\"The ID of the email thread\",\n show=False,\n required=True,\n advanced=False,\n ),\n MessageTextInput(\n name=\"query\",\n display_name=\"Query\",\n info=\"Search query to filter emails (e.g., 'from:someone@email.com' or 'subject:hello')\",\n show=False,\n advanced=False,\n ),\n MessageTextInput(\n name=\"message_body\",\n display_name=\"Message Body\",\n info=\"The body content of the message to be sent\",\n show=False,\n advanced=True,\n ),\n # Label management fields\n MessageTextInput(\n name=\"label_name\",\n display_name=\"Label Name\",\n info=\"Name of the Gmail label to create, modify, or filter by\",\n show=False,\n required=True,\n advanced=False,\n ),\n MessageTextInput(\n name=\"label_id\",\n display_name=\"Label ID\",\n info=\"The ID of the Gmail label\",\n show=False,\n advanced=False,\n ),\n MessageTextInput(\n name=\"label_ids\",\n display_name=\"Label Ids\",\n info=\"Comma-separated list of label IDs to filter messages\",\n show=False,\n advanced=True,\n ),\n MessageTextInput(\n name=\"label_list_visibility\",\n display_name=\"Label List Visibility\",\n info=\"The visibility of the label in the label list in the Gmail web interface\",\n show=False,\n advanced=True,\n ),\n MessageTextInput(\n name=\"message_list_visibility\",\n display_name=\"Message List Visibility\",\n info=\"The visibility of the label in the message list in the Gmail web interface\",\n show=False,\n advanced=True,\n ),\n # Pagination and filtering\n MessageTextInput(\n name=\"page_token\",\n display_name=\"Page Token\",\n info=\"Token for retrieving the next page of results\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"include_spam_trash\",\n display_name=\"Include messages from Spam/Trash\",\n info=\"Include messages from SPAM and TRASH in the results\",\n show=False,\n value=False,\n advanced=True,\n ),\n MessageTextInput(\n name=\"format\",\n display_name=\"Format\",\n info=\"The format to return the message in. Possible values: minimal, full, raw, metadata\",\n show=False,\n advanced=True,\n ),\n # Contact management fields\n MessageTextInput(\n name=\"resource_name\",\n display_name=\"Resource Name\",\n info=\"The resource name of the person to provide information about\",\n show=False,\n advanced=True,\n ),\n MessageTextInput(\n name=\"person_fields\",\n display_name=\"Person fields\",\n info=\"Fields to return for the person. Multiple fields can be specified by separating them with commas\",\n show=False,\n advanced=True,\n ),\n # Attachment handling\n MessageTextInput(\n name=\"attachment_id\",\n display_name=\"Attachment ID\",\n info=\"Id of the attachment\",\n show=False,\n required=True,\n advanced=False,\n ),\n MessageTextInput(\n name=\"file_name\",\n display_name=\"File name\",\n info=\"File name of the attachment file\",\n show=False,\n required=True,\n advanced=False,\n ),\n FileInput(\n name=\"attachment\",\n display_name=\"Add Attachment\",\n file_types=[\n \"csv\",\n \"txt\",\n \"doc\",\n \"docx\",\n \"xls\",\n \"xlsx\",\n \"pdf\",\n \"png\",\n \"jpg\",\n \"jpeg\",\n \"gif\",\n \"zip\",\n \"rar\",\n \"ppt\",\n \"pptx\",\n ],\n info=\"Add an attachment\",\n show=False,\n ),\n ]\n\n def execute_action(self):\n \"\"\"Execute action and return response as Message.\"\"\"\n toolset = self._build_wrapper()\n\n try:\n self._build_action_maps()\n # Get the display name from the action list\n display_name = self.action[0][\"name\"] if isinstance(self.action, list) and self.action else self.action\n # Use the display_to_key_map to get the action key\n action_key = self._display_to_key_map.get(display_name)\n if not action_key:\n msg = f\"Invalid action: {display_name}\"\n raise ValueError(msg)\n\n enum_name = getattr(Action, action_key)\n params = {}\n if action_key in self._actions_data:\n for field in self._actions_data[action_key][\"action_fields\"]:\n value = getattr(self, field)\n\n if value is None or value == \"\":\n continue\n\n if field in [\"cc\", \"bcc\", \"label_ids\"] and value:\n value = [item.strip() for item in value.split(\",\")]\n\n if field in self._bool_variables:\n value = bool(value)\n\n params[field] = value\n\n if params.get(\"gmail_user_id\"):\n params[\"user_id\"] = params.pop(\"gmail_user_id\")\n\n result = toolset.execute_action(\n action=enum_name,\n params=params,\n )\n if not result.get(\"successful\"):\n message_str = result.get(\"data\", {}).get(\"message\", \"{}\")\n try:\n error_data = json.loads(message_str).get(\"error\", {})\n except json.JSONDecodeError:\n error_data = {\"error\": \"Failed to get exact error details\"}\n return {\n \"code\": error_data.get(\"code\"),\n \"message\": error_data.get(\"message\"),\n \"errors\": error_data.get(\"errors\", []),\n \"status\": error_data.get(\"status\"),\n }\n\n result_data = result.get(\"data\", {})\n actions_data = self._actions_data.get(action_key, {})\n # If 'get_result_field' is True and 'result_field' is specified, extract the data\n # using 'result_field'. Otherwise, fall back to the entire 'data' field in the response.\n if actions_data.get(\"get_result_field\") and actions_data.get(\"result_field\"):\n result_data = result_data.get(actions_data.get(\"result_field\"), result.get(\"data\", []))\n if len(result_data) != 1 and not actions_data.get(\"result_field\") and actions_data.get(\"get_result_field\"):\n msg = f\"Expected a dict with a single key, got {len(result_data)} keys: {result_data.keys()}\"\n raise ValueError(msg)\n return result_data # noqa: TRY300\n except Exception as e:\n logger.error(f\"Error executing action: {e}\")\n display_name = self.action[0][\"name\"] if isinstance(self.action, list) and self.action else str(self.action)\n msg = f\"Failed to execute {display_name}: {e!s}\"\n raise ValueError(msg) from e\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n return super().update_build_config(build_config, field_value, field_name)\n\n def set_default_tools(self):\n self._default_tools = {\n \"GMAIL_SEND_EMAIL\",\n \"GMAIL_FETCH_EMAILS\",\n }\n"
+ "value": "import json\nfrom typing import Any\n\nfrom composio import Action\n\nfrom langflow.base.composio.composio_base import ComposioBaseComponent\nfrom langflow.inputs.inputs import (\n BoolInput,\n FileInput,\n IntInput,\n MessageTextInput,\n)\nfrom langflow.logging import logger\n\n\nclass ComposioGmailAPIComponent(ComposioBaseComponent):\n \"\"\"Gmail API component for interacting with Gmail services.\"\"\"\n\n display_name: str = \"Gmail\"\n name = \"GmailAPI\"\n icon = \"Google\"\n documentation: str = \"https://docs.composio.dev\"\n app_name = \"gmail\"\n\n # Gmail-specific actions\n _actions_data: dict = {\n \"GMAIL_SEND_EMAIL\": {\n \"display_name\": \"Send Email\",\n \"action_fields\": [\n \"recipient_email\",\n \"subject\",\n \"body\",\n \"cc\",\n \"bcc\",\n \"is_html\",\n \"gmail_user_id\",\n \"attachment\",\n ],\n },\n \"GMAIL_FETCH_EMAILS\": {\n \"display_name\": \"Fetch Emails\",\n \"action_fields\": [\n \"gmail_user_id\",\n \"max_results\",\n \"query\",\n \"page_token\",\n \"label_ids\",\n \"include_spam_trash\",\n ],\n \"get_result_field\": True,\n \"result_field\": \"messages\",\n },\n \"GMAIL_GET_PROFILE\": {\n \"display_name\": \"Get User Profile\",\n \"action_fields\": [\"gmail_user_id\"],\n },\n \"GMAIL_FETCH_MESSAGE_BY_MESSAGE_ID\": {\n \"display_name\": \"Get Email By ID\",\n \"action_fields\": [\"message_id\", \"gmail_user_id\", \"format\"],\n \"get_result_field\": False,\n },\n \"GMAIL_CREATE_EMAIL_DRAFT\": {\n \"display_name\": \"Create Draft Email\",\n \"action_fields\": [\n \"recipient_email\",\n \"subject\",\n \"body\",\n \"cc\",\n \"bcc\",\n \"is_html\",\n \"attachment\",\n \"gmail_user_id\",\n ],\n },\n \"GMAIL_FETCH_MESSAGE_BY_THREAD_ID\": {\n \"display_name\": \"Get Message By Thread ID\",\n \"action_fields\": [\"thread_id\", \"page_token\", \"gmail_user_id\"],\n \"get_result_field\": False,\n },\n \"GMAIL_LIST_THREADS\": {\n \"display_name\": \"List Email Threads\",\n \"action_fields\": [\"max_results\", \"query\", \"gmail_user_id\", \"page_token\"],\n \"get_result_field\": True,\n \"result_field\": \"threads\",\n },\n \"GMAIL_REPLY_TO_THREAD\": {\n \"display_name\": \"Reply To Thread\",\n \"action_fields\": [\"thread_id\", \"message_body\", \"recipient_email\", \"gmail_user_id\", \"cc\", \"bcc\", \"is_html\"],\n },\n \"GMAIL_LIST_LABELS\": {\n \"display_name\": \"List Email Labels\",\n \"action_fields\": [\"gmail_user_id\"],\n \"get_result_field\": True,\n \"result_field\": \"labels\",\n },\n \"GMAIL_CREATE_LABEL\": {\n \"display_name\": \"Create Email Label\",\n \"action_fields\": [\"label_name\", \"label_list_visibility\", \"message_list_visibility\", \"gmail_user_id\"],\n },\n \"GMAIL_GET_PEOPLE\": {\n \"display_name\": \"Get Contacts\",\n \"action_fields\": [\"resource_name\", \"person_fields\"],\n \"get_result_field\": True,\n \"result_field\": \"people_data\",\n },\n \"GMAIL_REMOVE_LABEL\": {\n \"display_name\": \"Delete Email Label\",\n \"action_fields\": [\"label_id\", \"gmail_user_id\"],\n \"get_result_field\": False,\n },\n \"GMAIL_GET_ATTACHMENT\": {\n \"display_name\": \"Get Attachment\",\n \"action_fields\": [\"message_id\", \"attachment_id\", \"file_name\", \"gmail_user_id\"],\n },\n }\n _all_fields = {field for action_data in _actions_data.values() for field in action_data[\"action_fields\"]}\n _bool_variables = {\"is_html\", \"include_spam_trash\"}\n\n # Combine base inputs with Gmail-specific inputs\n inputs = [\n *ComposioBaseComponent._base_inputs,\n # Email composition fields\n MessageTextInput(\n name=\"recipient_email\",\n display_name=\"Recipient Email\",\n info=\"Email address of the recipient\",\n show=False,\n required=True,\n advanced=False,\n ),\n MessageTextInput(\n name=\"subject\",\n display_name=\"Subject\",\n info=\"Subject of the email\",\n show=False,\n required=True,\n advanced=False,\n ),\n MessageTextInput(\n name=\"body\",\n display_name=\"Body\",\n required=True,\n info=\"Content of the email\",\n show=False,\n advanced=False,\n ),\n MessageTextInput(\n name=\"cc\",\n display_name=\"CC\",\n info=\"Email addresses to CC (Carbon Copy) in the email, separated by commas\",\n show=False,\n advanced=True,\n ),\n MessageTextInput(\n name=\"bcc\",\n display_name=\"BCC\",\n info=\"Email addresses to BCC (Blind Carbon Copy) in the email, separated by commas\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"is_html\",\n display_name=\"Is HTML\",\n info=\"Specify whether the email body contains HTML content (true/false)\",\n show=False,\n value=False,\n advanced=True,\n ),\n # Email retrieval and management fields\n MessageTextInput(\n name=\"gmail_user_id\",\n display_name=\"User ID\",\n info=\"The user's email address or 'me' for the authenticated user\",\n show=False,\n advanced=True,\n ),\n IntInput(\n name=\"max_results\",\n display_name=\"Max Results\",\n required=True,\n info=\"Maximum number of emails to be returned\",\n show=False,\n advanced=False,\n ),\n MessageTextInput(\n name=\"message_id\",\n display_name=\"Message ID\",\n info=\"The ID of the specific email message\",\n show=False,\n required=True,\n advanced=False,\n ),\n MessageTextInput(\n name=\"thread_id\",\n display_name=\"Thread ID\",\n info=\"The ID of the email thread\",\n show=False,\n required=True,\n advanced=False,\n ),\n MessageTextInput(\n name=\"query\",\n display_name=\"Query\",\n info=\"Search query to filter emails (e.g., 'from:someone@email.com' or 'subject:hello')\",\n show=False,\n advanced=False,\n ),\n MessageTextInput(\n name=\"message_body\",\n display_name=\"Message Body\",\n info=\"The body content of the message to be sent\",\n show=False,\n advanced=True,\n ),\n # Label management fields\n MessageTextInput(\n name=\"label_name\",\n display_name=\"Label Name\",\n info=\"Name of the Gmail label to create, modify, or filter by\",\n show=False,\n required=True,\n advanced=False,\n ),\n MessageTextInput(\n name=\"label_id\",\n display_name=\"Label ID\",\n info=\"The ID of the Gmail label\",\n show=False,\n advanced=False,\n ),\n MessageTextInput(\n name=\"label_ids\",\n display_name=\"Label Ids\",\n info=\"Comma-separated list of label IDs to filter messages\",\n show=False,\n advanced=True,\n ),\n MessageTextInput(\n name=\"label_list_visibility\",\n display_name=\"Label List Visibility\",\n info=\"The visibility of the label in the label list in the Gmail web interface\",\n show=False,\n advanced=True,\n ),\n MessageTextInput(\n name=\"message_list_visibility\",\n display_name=\"Message List Visibility\",\n info=\"The visibility of the label in the message list in the Gmail web interface\",\n show=False,\n advanced=True,\n ),\n # Pagination and filtering\n MessageTextInput(\n name=\"page_token\",\n display_name=\"Page Token\",\n info=\"Token for retrieving the next page of results\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"include_spam_trash\",\n display_name=\"Include messages from Spam/Trash\",\n info=\"Include messages from SPAM and TRASH in the results\",\n show=False,\n value=False,\n advanced=True,\n ),\n MessageTextInput(\n name=\"format\",\n display_name=\"Format\",\n info=\"The format to return the message in. Possible values: minimal, full, raw, metadata\",\n show=False,\n advanced=True,\n ),\n # Contact management fields\n MessageTextInput(\n name=\"resource_name\",\n display_name=\"Resource Name\",\n info=\"The resource name of the person to provide information about\",\n show=False,\n advanced=True,\n ),\n MessageTextInput(\n name=\"person_fields\",\n display_name=\"Person fields\",\n info=\"Fields to return for the person. Multiple fields can be specified by separating them with commas\",\n show=False,\n advanced=True,\n ),\n # Attachment handling\n MessageTextInput(\n name=\"attachment_id\",\n display_name=\"Attachment ID\",\n info=\"Id of the attachment\",\n show=False,\n required=True,\n advanced=False,\n ),\n MessageTextInput(\n name=\"file_name\",\n display_name=\"File name\",\n info=\"File name of the attachment file\",\n show=False,\n required=True,\n advanced=False,\n ),\n FileInput(\n name=\"attachment\",\n display_name=\"Add Attachment\",\n file_types=[\n \"csv\",\n \"txt\",\n \"doc\",\n \"docx\",\n \"xls\",\n \"xlsx\",\n \"pdf\",\n \"png\",\n \"jpg\",\n \"jpeg\",\n \"gif\",\n \"zip\",\n \"rar\",\n \"ppt\",\n \"pptx\",\n ],\n info=\"Add an attachment\",\n show=False,\n ),\n ]\n\n def execute_action(self):\n \"\"\"Execute action and return response as Message.\"\"\"\n toolset = self._build_wrapper()\n\n try:\n self._build_action_maps()\n # Get the display name from the action list\n display_name = self.action[0][\"name\"] if isinstance(self.action, list) and self.action else self.action\n # Use the display_to_key_map to get the action key\n action_key = self._display_to_key_map.get(display_name)\n if not action_key:\n msg = f\"Invalid action: {display_name}\"\n raise ValueError(msg)\n\n enum_name = getattr(Action, action_key)\n params = {}\n if action_key in self._actions_data:\n for field in self._actions_data[action_key][\"action_fields\"]:\n value = getattr(self, field)\n\n if value is None or value == \"\":\n continue\n\n if field in [\"cc\", \"bcc\", \"label_ids\"] and value:\n value = [item.strip() for item in value.split(\",\")]\n\n if field in self._bool_variables:\n value = bool(value)\n\n params[field] = value\n\n if params.get(\"gmail_user_id\"):\n params[\"user_id\"] = params.pop(\"gmail_user_id\")\n\n result = toolset.execute_action(\n action=enum_name,\n params=params,\n )\n if not result.get(\"successful\"):\n message_str = result.get(\"data\", {}).get(\"message\", \"{}\")\n try:\n error_data = json.loads(message_str).get(\"error\", {})\n except json.JSONDecodeError:\n error_data = {\"error\": \"Failed to get exact error details\"}\n return {\n \"code\": error_data.get(\"code\"),\n \"message\": error_data.get(\"message\"),\n \"errors\": error_data.get(\"errors\", []),\n \"status\": error_data.get(\"status\"),\n }\n\n result_data = result.get(\"data\", {})\n actions_data = self._actions_data.get(action_key, {})\n # If 'get_result_field' is True and 'result_field' is specified, extract the data\n # using 'result_field'. Otherwise, fall back to the entire 'data' field in the response.\n if actions_data.get(\"get_result_field\") and actions_data.get(\"result_field\"):\n result_data = result_data.get(actions_data.get(\"result_field\"), result.get(\"data\", []))\n if len(result_data) != 1 and not actions_data.get(\"result_field\") and actions_data.get(\"get_result_field\"):\n msg = f\"Expected a dict with a single key, got {len(result_data)} keys: {result_data.keys()}\"\n raise ValueError(msg)\n return result_data # noqa: TRY300\n except Exception as e:\n logger.error(f\"Error executing action: {e}\")\n display_name = self.action[0][\"name\"] if isinstance(self.action, list) and self.action else str(self.action)\n msg = f\"Failed to execute {display_name}: {e!s}\"\n raise ValueError(msg) from e\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n return super().update_build_config(build_config, field_value, field_name)\n\n def set_default_tools(self):\n self._default_tools = {\n \"GMAIL_SEND_EMAIL\",\n \"GMAIL_FETCH_EMAILS\",\n }\n"
},
"entity_id": {
"_input_type": "MessageTextInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Hybrid Search RAG.json b/src/backend/base/langflow/initial_setup/starter_projects/Hybrid Search RAG.json
index 104648f7d..933e67c28 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Hybrid Search RAG.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Hybrid Search RAG.json
@@ -344,7 +344,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
@@ -624,7 +624,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from pydantic import BaseModel, Field, create_model\nfrom trustcall import create_extractor\n\nfrom langflow.base.models.chat_result import get_chat_result\nfrom langflow.custom import Component\nfrom langflow.helpers.base_model import build_model_from_schema\nfrom langflow.io import (\n BoolInput,\n HandleInput,\n MessageTextInput,\n MultilineInput,\n Output,\n TableInput,\n)\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.table import EditMode\n\n\nclass StructuredOutputComponent(Component):\n display_name = \"Structured Output\"\n description = (\n \"Transforms LLM responses into **structured data formats**. Ideal for extracting specific information \"\n \"or creating consistent outputs.\"\n )\n name = \"StructuredOutput\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"llm\",\n display_name=\"Language Model\",\n info=\"The language model to use to generate the structured output.\",\n input_types=[\"LanguageModel\"],\n required=True,\n ),\n MessageTextInput(\n name=\"input_value\",\n display_name=\"Input Message\",\n info=\"The input message to the language model.\",\n tool_mode=True,\n required=True,\n ),\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Format Instructions\",\n info=\"The instructions to the language model for formatting the output.\",\n value=(\n \"You are an AI system designed to extract structured information from unstructured text.\"\n \"Given the input_text, return a JSON object with predefined keys based on the expected structure.\"\n \"Extract values accurately and format them according to the specified type \"\n \"(e.g., string, integer, float, date).\"\n \"If a value is missing or cannot be determined, return a default \"\n \"(e.g., null, 0, or 'N/A').\"\n \"If multiple instances of the expected structure exist within the input_text, \"\n \"stream each as a separate JSON object.\"\n ),\n required=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"schema_name\",\n display_name=\"Schema Name\",\n info=\"Provide a name for the output data schema.\",\n advanced=True,\n ),\n TableInput(\n name=\"output_schema\",\n display_name=\"Output Schema\",\n info=\"Define the structure and data types for the model's output.\",\n required=True,\n # TODO: remove deault value\n table_schema=[\n {\n \"name\": \"name\",\n \"display_name\": \"Name\",\n \"type\": \"str\",\n \"description\": \"Specify the name of the output field.\",\n \"default\": \"field\",\n \"edit_mode\": EditMode.INLINE,\n },\n {\n \"name\": \"description\",\n \"display_name\": \"Description\",\n \"type\": \"str\",\n \"description\": \"Describe the purpose of the output field.\",\n \"default\": \"description of field\",\n \"edit_mode\": EditMode.POPOVER,\n },\n {\n \"name\": \"type\",\n \"display_name\": \"Type\",\n \"type\": \"str\",\n \"edit_mode\": EditMode.INLINE,\n \"description\": (\"Indicate the data type of the output field (e.g., str, int, float, bool, dict).\"),\n \"options\": [\"str\", \"int\", \"float\", \"bool\", \"dict\"],\n \"default\": \"str\",\n },\n ],\n value=[\n {\n \"name\": \"field\",\n \"description\": \"description of field\",\n \"type\": \"str\",\n \"multiple\": \"False\",\n }\n ],\n ),\n BoolInput(\n name=\"multiple\",\n advanced=True,\n display_name=\"Generate Multiple\",\n info=\"[Deprecated] Always set to True\",\n value=True,\n ),\n ]\n\n outputs = [\n Output(\n name=\"structured_output\",\n display_name=\"Structured Output\",\n method=\"build_structured_output\",\n ),\n Output(\n name=\"structured_output_dataframe\",\n display_name=\"DataFrame\",\n method=\"as_dataframe\",\n ),\n ]\n\n def build_structured_output_base(self) -> Data:\n schema_name = self.schema_name or \"OutputModel\"\n\n if not hasattr(self.llm, \"with_structured_output\"):\n msg = \"Language model does not support structured output.\"\n raise TypeError(msg)\n if not self.output_schema:\n msg = \"Output schema cannot be empty\"\n raise ValueError(msg)\n\n output_model_ = build_model_from_schema(self.output_schema)\n\n output_model = create_model(\n schema_name,\n __doc__=f\"A list of {schema_name}.\",\n objects=(list[output_model_], Field(description=f\"A list of {schema_name}.\")), # type: ignore[valid-type]\n )\n\n try:\n llm_with_structured_output = create_extractor(self.llm, tools=[output_model])\n except NotImplementedError as exc:\n msg = f\"{self.llm.__class__.__name__} does not support structured output.\"\n raise TypeError(msg) from exc\n config_dict = {\n \"run_name\": self.display_name,\n \"project_name\": self.get_project_name(),\n \"callbacks\": self.get_langchain_callbacks(),\n }\n result = get_chat_result(\n runnable=llm_with_structured_output,\n system_message=self.system_prompt,\n input_value=self.input_value,\n config=config_dict,\n )\n if isinstance(result, BaseModel):\n result = result.model_dump()\n if responses := result.get(\"responses\"):\n result = responses[0].model_dump()\n if result and \"objects\" in result:\n return result[\"objects\"]\n\n return result\n\n def build_structured_output(self) -> Data:\n output = self.build_structured_output_base()\n\n return Data(text_key=\"results\", data={\"results\": output})\n\n def as_dataframe(self) -> DataFrame:\n output = self.build_structured_output_base()\n if isinstance(output, list):\n return DataFrame(data=output)\n return DataFrame(data=[output])\n"
+ "value": "from pydantic import BaseModel, Field, create_model\nfrom trustcall import create_extractor\n\nfrom langflow.base.models.chat_result import get_chat_result\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.helpers.base_model import build_model_from_schema\nfrom langflow.io import (\n BoolInput,\n HandleInput,\n MessageTextInput,\n MultilineInput,\n Output,\n TableInput,\n)\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.table import EditMode\n\n\nclass StructuredOutputComponent(Component):\n display_name = \"Structured Output\"\n description = (\n \"Transforms LLM responses into **structured data formats**. Ideal for extracting specific information \"\n \"or creating consistent outputs.\"\n )\n name = \"StructuredOutput\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"llm\",\n display_name=\"Language Model\",\n info=\"The language model to use to generate the structured output.\",\n input_types=[\"LanguageModel\"],\n required=True,\n ),\n MessageTextInput(\n name=\"input_value\",\n display_name=\"Input Message\",\n info=\"The input message to the language model.\",\n tool_mode=True,\n required=True,\n ),\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Format Instructions\",\n info=\"The instructions to the language model for formatting the output.\",\n value=(\n \"You are an AI system designed to extract structured information from unstructured text.\"\n \"Given the input_text, return a JSON object with predefined keys based on the expected structure.\"\n \"Extract values accurately and format them according to the specified type \"\n \"(e.g., string, integer, float, date).\"\n \"If a value is missing or cannot be determined, return a default \"\n \"(e.g., null, 0, or 'N/A').\"\n \"If multiple instances of the expected structure exist within the input_text, \"\n \"stream each as a separate JSON object.\"\n ),\n required=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"schema_name\",\n display_name=\"Schema Name\",\n info=\"Provide a name for the output data schema.\",\n advanced=True,\n ),\n TableInput(\n name=\"output_schema\",\n display_name=\"Output Schema\",\n info=\"Define the structure and data types for the model's output.\",\n required=True,\n # TODO: remove deault value\n table_schema=[\n {\n \"name\": \"name\",\n \"display_name\": \"Name\",\n \"type\": \"str\",\n \"description\": \"Specify the name of the output field.\",\n \"default\": \"field\",\n \"edit_mode\": EditMode.INLINE,\n },\n {\n \"name\": \"description\",\n \"display_name\": \"Description\",\n \"type\": \"str\",\n \"description\": \"Describe the purpose of the output field.\",\n \"default\": \"description of field\",\n \"edit_mode\": EditMode.POPOVER,\n },\n {\n \"name\": \"type\",\n \"display_name\": \"Type\",\n \"type\": \"str\",\n \"edit_mode\": EditMode.INLINE,\n \"description\": (\"Indicate the data type of the output field (e.g., str, int, float, bool, dict).\"),\n \"options\": [\"str\", \"int\", \"float\", \"bool\", \"dict\"],\n \"default\": \"str\",\n },\n ],\n value=[\n {\n \"name\": \"field\",\n \"description\": \"description of field\",\n \"type\": \"str\",\n \"multiple\": \"False\",\n }\n ],\n ),\n BoolInput(\n name=\"multiple\",\n advanced=True,\n display_name=\"Generate Multiple\",\n info=\"[Deprecated] Always set to True\",\n value=True,\n ),\n ]\n\n outputs = [\n Output(\n name=\"structured_output\",\n display_name=\"Structured Output\",\n method=\"build_structured_output\",\n ),\n Output(\n name=\"structured_output_dataframe\",\n display_name=\"DataFrame\",\n method=\"as_dataframe\",\n ),\n ]\n\n def build_structured_output_base(self) -> Data:\n schema_name = self.schema_name or \"OutputModel\"\n\n if not hasattr(self.llm, \"with_structured_output\"):\n msg = \"Language model does not support structured output.\"\n raise TypeError(msg)\n if not self.output_schema:\n msg = \"Output schema cannot be empty\"\n raise ValueError(msg)\n\n output_model_ = build_model_from_schema(self.output_schema)\n\n output_model = create_model(\n schema_name,\n __doc__=f\"A list of {schema_name}.\",\n objects=(list[output_model_], Field(description=f\"A list of {schema_name}.\")), # type: ignore[valid-type]\n )\n\n try:\n llm_with_structured_output = create_extractor(self.llm, tools=[output_model])\n except NotImplementedError as exc:\n msg = f\"{self.llm.__class__.__name__} does not support structured output.\"\n raise TypeError(msg) from exc\n config_dict = {\n \"run_name\": self.display_name,\n \"project_name\": self.get_project_name(),\n \"callbacks\": self.get_langchain_callbacks(),\n }\n result = get_chat_result(\n runnable=llm_with_structured_output,\n system_message=self.system_prompt,\n input_value=self.input_value,\n config=config_dict,\n )\n if isinstance(result, BaseModel):\n result = result.model_dump()\n if responses := result.get(\"responses\"):\n result = responses[0].model_dump()\n if result and \"objects\" in result:\n return result[\"objects\"]\n\n return result\n\n def build_structured_output(self) -> Data:\n output = self.build_structured_output_base()\n\n return Data(text_key=\"results\", data={\"results\": output})\n\n def as_dataframe(self) -> DataFrame:\n output = self.build_structured_output_base()\n if isinstance(output, list):\n return DataFrame(data=output)\n return DataFrame(data=[output])\n"
},
"input_value": {
"_input_type": "MessageTextInput",
@@ -969,7 +969,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
+ "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
},
"input_value": {
"_input_type": "MessageInput",
@@ -1441,7 +1441,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import re\nfrom collections import defaultdict\nfrom dataclasses import asdict, dataclass, field\n\nfrom astrapy import DataAPIClient, Database\nfrom astrapy.data.info.reranking import RerankServiceOptions\nfrom astrapy.info import CollectionDescriptor, CollectionLexicalOptions, CollectionRerankOptions\nfrom langchain_astradb import AstraDBVectorStore, VectorServiceOptions\nfrom langchain_astradb.utils.astradb import HybridSearchMode, _AstraDBCollectionEnvironment\n\nfrom langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom langflow.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom langflow.helpers import docs_to_data\nfrom langflow.inputs import FloatInput, NestedDictInput\nfrom langflow.io import (\n BoolInput,\n DropdownInput,\n HandleInput,\n IntInput,\n QueryInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema import Data\nfrom langflow.utils.version import get_version_info\n\n\n@vector_store_connection\nclass AstraDBVectorStoreComponent(LCVectorStoreComponent):\n display_name: str = \"Astra DB\"\n description: str = \"Ingest and search documents in Astra DB\"\n documentation: str = \"https://docs.datastax.com/en/langflow/astra-components.html\"\n name = \"AstraDB\"\n icon: str = \"AstraDB\"\n\n _cached_vector_store: AstraDBVectorStore | None = None\n\n @dataclass\n class NewDatabaseInput:\n functionality: str = \"create\"\n fields: dict[str, dict] = field(\n default_factory=lambda: {\n \"data\": {\n \"node\": {\n \"name\": \"create_database\",\n \"description\": \"Please allow several minutes for creation to complete.\",\n \"display_name\": \"Create new database\",\n \"field_order\": [\"01_new_database_name\", \"02_cloud_provider\", \"03_region\"],\n \"template\": {\n \"01_new_database_name\": StrInput(\n name=\"new_database_name\",\n display_name=\"Name\",\n info=\"Name of the new database to create in Astra DB.\",\n required=True,\n ),\n \"02_cloud_provider\": DropdownInput(\n name=\"cloud_provider\",\n display_name=\"Cloud provider\",\n info=\"Cloud provider for the new database.\",\n options=[],\n required=True,\n real_time_refresh=True,\n ),\n \"03_region\": DropdownInput(\n name=\"region\",\n display_name=\"Region\",\n info=\"Region for the new database.\",\n options=[],\n required=True,\n ),\n },\n },\n }\n }\n )\n\n @dataclass\n class NewCollectionInput:\n functionality: str = \"create\"\n fields: dict[str, dict] = field(\n default_factory=lambda: {\n \"data\": {\n \"node\": {\n \"name\": \"create_collection\",\n \"description\": \"Please allow several seconds for creation to complete.\",\n \"display_name\": \"Create new collection\",\n \"field_order\": [\n \"01_new_collection_name\",\n \"02_embedding_generation_provider\",\n \"03_embedding_generation_model\",\n \"04_dimension\",\n ],\n \"template\": {\n \"01_new_collection_name\": StrInput(\n name=\"new_collection_name\",\n display_name=\"Name\",\n info=\"Name of the new collection to create in Astra DB.\",\n required=True,\n ),\n \"02_embedding_generation_provider\": DropdownInput(\n name=\"embedding_generation_provider\",\n display_name=\"Embedding generation method\",\n info=\"Provider to use for generating embeddings.\",\n helper_text=(\n \"To create collections with more embedding provider options, go to \"\n 'your database in Astra DB'\n ),\n real_time_refresh=True,\n required=True,\n options=[],\n ),\n \"03_embedding_generation_model\": DropdownInput(\n name=\"embedding_generation_model\",\n display_name=\"Embedding model\",\n info=\"Model to use for generating embeddings.\",\n real_time_refresh=True,\n options=[],\n ),\n \"04_dimension\": IntInput(\n name=\"dimension\",\n display_name=\"Dimensions\",\n info=\"Dimensions of the embeddings to generate.\",\n value=None,\n ),\n },\n },\n }\n }\n )\n\n inputs = [\n SecretStrInput(\n name=\"token\",\n display_name=\"Astra DB Application Token\",\n info=\"Authentication token for accessing Astra DB.\",\n value=\"ASTRA_DB_APPLICATION_TOKEN\",\n required=True,\n real_time_refresh=True,\n input_types=[],\n ),\n DropdownInput(\n name=\"environment\",\n display_name=\"Environment\",\n info=\"The environment for the Astra DB API Endpoint.\",\n options=[\"prod\", \"test\", \"dev\"],\n value=\"prod\",\n advanced=True,\n real_time_refresh=True,\n combobox=True,\n ),\n DropdownInput(\n name=\"database_name\",\n display_name=\"Database\",\n info=\"The Database name for the Astra DB instance.\",\n required=True,\n refresh_button=True,\n real_time_refresh=True,\n dialog_inputs=asdict(NewDatabaseInput()),\n combobox=True,\n ),\n StrInput(\n name=\"api_endpoint\",\n display_name=\"Astra DB API Endpoint\",\n info=\"The API Endpoint for the Astra DB instance. Supercedes database selection.\",\n show=False,\n ),\n DropdownInput(\n name=\"keyspace\",\n display_name=\"Keyspace\",\n info=\"Optional keyspace within Astra DB to use for the collection.\",\n advanced=True,\n options=[],\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"collection_name\",\n display_name=\"Collection\",\n info=\"The name of the collection within Astra DB where the vectors will be stored.\",\n required=True,\n refresh_button=True,\n real_time_refresh=True,\n dialog_inputs=asdict(NewCollectionInput()),\n combobox=True,\n show=False,\n ),\n HandleInput(\n name=\"embedding_model\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Specify the Embedding Model. Not required for Astra Vectorize collections.\",\n required=False,\n show=False,\n ),\n *LCVectorStoreComponent.inputs,\n DropdownInput(\n name=\"search_method\",\n display_name=\"Search Method\",\n info=(\n \"Determine how your content is matched: Vector finds semantic similarity, \"\n \"and Hybrid Search (suggested) combines both approaches \"\n \"with a reranker.\"\n ),\n options=[\"Hybrid Search\", \"Vector Search\"], # TODO: Restore Lexical Search?\n options_metadata=[{\"icon\": \"SearchHybrid\"}, {\"icon\": \"SearchVector\"}],\n value=\"Vector Search\",\n advanced=True,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"reranker\",\n display_name=\"Reranker\",\n info=\"Post-retrieval model that re-scores results for optimal relevance ranking.\",\n show=False,\n toggle=True,\n ),\n QueryInput(\n name=\"lexical_terms\",\n display_name=\"Lexical Terms\",\n info=\"Add additional terms/keywords to augment search precision.\",\n placeholder=\"Enter terms to search...\",\n separator=\" \",\n show=False,\n value=\"\",\n advanced=True,\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Number of Search Results\",\n info=\"Number of search results to return.\",\n advanced=True,\n value=4,\n ),\n DropdownInput(\n name=\"search_type\",\n display_name=\"Search Type\",\n info=\"Search type to use\",\n options=[\"Similarity\", \"Similarity with score threshold\", \"MMR (Max Marginal Relevance)\"],\n value=\"Similarity\",\n advanced=True,\n ),\n FloatInput(\n name=\"search_score_threshold\",\n display_name=\"Search Score Threshold\",\n info=\"Minimum similarity score threshold for search results. \"\n \"(when using 'Similarity with score threshold')\",\n value=0,\n advanced=True,\n ),\n NestedDictInput(\n name=\"advanced_search_filter\",\n display_name=\"Search Metadata Filter\",\n info=\"Optional dictionary of filters to apply to the search query.\",\n advanced=True,\n ),\n BoolInput(\n name=\"autodetect_collection\",\n display_name=\"Autodetect Collection\",\n info=\"Boolean flag to determine whether to autodetect the collection.\",\n advanced=True,\n value=True,\n ),\n StrInput(\n name=\"content_field\",\n display_name=\"Content Field\",\n info=\"Field to use as the text content field for the vector store.\",\n advanced=True,\n ),\n StrInput(\n name=\"deletion_field\",\n display_name=\"Deletion Based On Field\",\n info=\"When this parameter is provided, documents in the target collection with \"\n \"metadata field values matching the input metadata field value will be deleted \"\n \"before new data is loaded.\",\n advanced=True,\n ),\n BoolInput(\n name=\"ignore_invalid_documents\",\n display_name=\"Ignore Invalid Documents\",\n info=\"Boolean flag to determine whether to ignore invalid documents at runtime.\",\n advanced=True,\n ),\n NestedDictInput(\n name=\"astradb_vectorstore_kwargs\",\n display_name=\"AstraDBVectorStore Parameters\",\n info=\"Optional dictionary of additional parameters for the AstraDBVectorStore.\",\n advanced=True,\n ),\n ]\n\n @classmethod\n def map_cloud_providers(cls):\n # TODO: Programmatically fetch the regions for each cloud provider\n return {\n \"dev\": {\n \"Amazon Web Services\": {\n \"id\": \"aws\",\n \"regions\": [\"us-west-2\"],\n },\n \"Google Cloud Platform\": {\n \"id\": \"gcp\",\n \"regions\": [\"us-central1\", \"europe-west4\"],\n },\n },\n \"test\": {\n \"Google Cloud Platform\": {\n \"id\": \"gcp\",\n \"regions\": [\"us-central1\"],\n },\n },\n \"prod\": {\n \"Amazon Web Services\": {\n \"id\": \"aws\",\n \"regions\": [\"us-east-2\", \"ap-south-1\", \"eu-west-1\"],\n },\n \"Google Cloud Platform\": {\n \"id\": \"gcp\",\n \"regions\": [\"us-east1\"],\n },\n \"Microsoft Azure\": {\n \"id\": \"azure\",\n \"regions\": [\"westus3\"],\n },\n },\n }\n\n @classmethod\n def get_vectorize_providers(cls, token: str, environment: str | None = None, api_endpoint: str | None = None):\n try:\n # Get the admin object\n client = DataAPIClient(environment=environment)\n admin_client = client.get_admin()\n db_admin = admin_client.get_database_admin(api_endpoint, token=token)\n\n # Get the list of embedding providers\n embedding_providers = db_admin.find_embedding_providers()\n\n vectorize_providers_mapping = {}\n # Map the provider display name to the provider key and models\n for provider_key, provider_data in embedding_providers.embedding_providers.items():\n # Get the provider display name and models\n display_name = provider_data.display_name\n models = [model.name for model in provider_data.models]\n\n # Build our mapping\n vectorize_providers_mapping[display_name] = [provider_key, models]\n\n # Sort the resulting dictionary\n return defaultdict(list, dict(sorted(vectorize_providers_mapping.items())))\n except Exception as _: # noqa: BLE001\n return {}\n\n @classmethod\n async def create_database_api(\n cls,\n new_database_name: str,\n cloud_provider: str,\n region: str,\n token: str,\n environment: str | None = None,\n keyspace: str | None = None,\n ):\n client = DataAPIClient(environment=environment)\n\n # Get the admin object\n admin_client = client.get_admin(token=token)\n\n # Get the environment, set to prod if null like\n my_env = environment or \"prod\"\n\n # Raise a value error if name isn't provided\n if not new_database_name:\n msg = \"Database name is required to create a new database.\"\n raise ValueError(msg)\n\n # Call the create database function\n return await admin_client.async_create_database(\n name=new_database_name,\n cloud_provider=cls.map_cloud_providers()[my_env][cloud_provider][\"id\"],\n region=region,\n keyspace=keyspace,\n wait_until_active=False,\n )\n\n @classmethod\n async def create_collection_api(\n cls,\n new_collection_name: str,\n token: str,\n api_endpoint: str,\n environment: str | None = None,\n keyspace: str | None = None,\n dimension: int | None = None,\n embedding_generation_provider: str | None = None,\n embedding_generation_model: str | None = None,\n reranker: str | None = None,\n ):\n # Build vectorize options, if needed\n vectorize_options = None\n if not dimension:\n providers = cls.get_vectorize_providers(token=token, environment=environment, api_endpoint=api_endpoint)\n vectorize_options = VectorServiceOptions(\n provider=providers.get(embedding_generation_provider, [None, []])[0],\n model_name=embedding_generation_model,\n )\n\n # Raise a value error if name isn't provided\n if not new_collection_name:\n msg = \"Collection name is required to create a new collection.\"\n raise ValueError(msg)\n\n # Define the base arguments being passed to the create collection function\n base_args = {\n \"collection_name\": new_collection_name,\n \"token\": token,\n \"api_endpoint\": api_endpoint,\n \"keyspace\": keyspace,\n \"environment\": environment,\n \"embedding_dimension\": dimension,\n \"collection_vector_service_options\": vectorize_options,\n }\n\n # Add optional arguments if the reranker is set\n if reranker:\n # Split the reranker field into a provider a model name\n provider, _ = reranker.split(\"/\")\n base_args[\"collection_rerank\"] = CollectionRerankOptions(\n service=RerankServiceOptions(provider=provider, model_name=reranker),\n )\n base_args[\"collection_lexical\"] = CollectionLexicalOptions(analyzer=\"STANDARD\")\n\n _AstraDBCollectionEnvironment(**base_args)\n\n @classmethod\n def get_database_list_static(cls, token: str, environment: str | None = None):\n client = DataAPIClient(environment=environment)\n\n # Get the admin object\n admin_client = client.get_admin(token=token)\n\n # Get the list of databases\n db_list = admin_client.list_databases()\n\n # Generate the api endpoint for each database\n db_info_dict = {}\n for db in db_list:\n try:\n # Get the API endpoint for the database\n api_endpoint = db.regions[0].api_endpoint\n\n # Get the number of collections\n try:\n # Get the number of collections in the database\n num_collections = len(\n client.get_database(\n api_endpoint,\n token=token,\n ).list_collection_names()\n )\n except Exception: # noqa: BLE001\n if db.status != \"PENDING\":\n continue\n num_collections = 0\n\n # Add the database to the dictionary\n db_info_dict[db.name] = {\n \"api_endpoint\": api_endpoint,\n \"keyspaces\": db.keyspaces,\n \"collections\": num_collections,\n \"status\": db.status if db.status != \"ACTIVE\" else None,\n \"org_id\": db.org_id if db.org_id else None,\n }\n except Exception: # noqa: BLE001, S110\n pass\n\n return db_info_dict\n\n def get_database_list(self):\n return self.get_database_list_static(\n token=self.token,\n environment=self.environment,\n )\n\n @classmethod\n def get_api_endpoint_static(\n cls,\n token: str,\n environment: str | None = None,\n api_endpoint: str | None = None,\n database_name: str | None = None,\n ):\n # If the api_endpoint is set, return it\n if api_endpoint:\n return api_endpoint\n\n # Check if the database_name is like a url\n if database_name and database_name.startswith(\"https://\"):\n return database_name\n\n # If the database is not set, nothing we can do.\n if not database_name:\n return None\n\n # Grab the database object\n db = cls.get_database_list_static(token=token, environment=environment).get(database_name)\n if not db:\n return None\n\n # Otherwise, get the URL from the database list\n return db.get(\"api_endpoint\")\n\n def get_api_endpoint(self):\n return self.get_api_endpoint_static(\n token=self.token,\n environment=self.environment,\n api_endpoint=self.api_endpoint,\n database_name=self.database_name,\n )\n\n @classmethod\n def get_database_id_static(cls, api_endpoint: str) -> str | None:\n # Pattern matches standard UUID format: 8-4-4-4-12 hexadecimal characters\n uuid_pattern = r\"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}\"\n match = re.search(uuid_pattern, api_endpoint)\n\n return match.group(0) if match else None\n\n def get_database_id(self):\n return self.get_database_id_static(api_endpoint=self.get_api_endpoint())\n\n def get_keyspace(self):\n keyspace = self.keyspace\n\n if keyspace:\n return keyspace.strip()\n\n return \"default_keyspace\"\n\n def get_database_object(self, api_endpoint: str | None = None):\n try:\n client = DataAPIClient(environment=self.environment)\n\n return client.get_database(\n api_endpoint or self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n except Exception as e:\n msg = f\"Error fetching database object: {e}\"\n raise ValueError(msg) from e\n\n def collection_data(self, collection_name: str, database: Database | None = None):\n try:\n if not database:\n client = DataAPIClient(environment=self.environment)\n\n database = client.get_database(\n self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n\n collection = database.get_collection(collection_name)\n\n return collection.estimated_document_count()\n except Exception as e: # noqa: BLE001\n self.log(f\"Error checking collection data: {e}\")\n\n return None\n\n def _initialize_database_options(self):\n try:\n return [\n {\n \"name\": name,\n \"status\": info[\"status\"],\n \"collections\": info[\"collections\"],\n \"api_endpoint\": info[\"api_endpoint\"],\n \"keyspaces\": info[\"keyspaces\"],\n \"org_id\": info[\"org_id\"],\n }\n for name, info in self.get_database_list().items()\n ]\n except Exception as e:\n msg = f\"Error fetching database options: {e}\"\n raise ValueError(msg) from e\n\n @classmethod\n def get_provider_icon(cls, collection: CollectionDescriptor | None = None, provider_name: str | None = None) -> str:\n # Get the provider name from the collection\n provider_name = provider_name or (\n collection.definition.vector.service.provider\n if (\n collection\n and collection.definition\n and collection.definition.vector\n and collection.definition.vector.service\n )\n else None\n )\n\n # If there is no provider, use the vector store icon\n if not provider_name or provider_name.lower() == \"bring your own\":\n return \"vectorstores\"\n\n # Map provider casings\n case_map = {\n \"nvidia\": \"NVIDIA\",\n \"openai\": \"OpenAI\",\n \"amazon bedrock\": \"AmazonBedrockEmbeddings\",\n \"azure openai\": \"AzureOpenAiEmbeddings\",\n \"cohere\": \"Cohere\",\n \"jina ai\": \"JinaAI\",\n \"mistral ai\": \"MistralAI\",\n \"upstage\": \"Upstage\",\n \"voyage ai\": \"VoyageAI\",\n }\n\n # Adjust the casing on some like nvidia\n return case_map[provider_name.lower()] if provider_name.lower() in case_map else provider_name.title()\n\n def _initialize_collection_options(self, api_endpoint: str | None = None):\n # Nothing to generate if we don't have an API endpoint yet\n api_endpoint = api_endpoint or self.get_api_endpoint()\n if not api_endpoint:\n return []\n\n # Retrieve the database object\n database = self.get_database_object(api_endpoint=api_endpoint)\n\n # Get the list of collections\n collection_list = database.list_collections(keyspace=self.get_keyspace())\n\n # Return the list of collections and metadata associated\n return [\n {\n \"name\": col.name,\n \"records\": self.collection_data(collection_name=col.name, database=database),\n \"provider\": (\n col.definition.vector.service.provider\n if col.definition.vector and col.definition.vector.service\n else None\n ),\n \"icon\": self.get_provider_icon(collection=col),\n \"model\": (\n col.definition.vector.service.model_name\n if col.definition.vector and col.definition.vector.service\n else None\n ),\n }\n for col in collection_list\n ]\n\n def reset_provider_options(self, build_config: dict) -> dict:\n \"\"\"Reset provider options and related configurations in the build_config dictionary.\"\"\"\n # Extract template path for cleaner access\n template = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n\n # Get vectorize providers\n vectorize_providers_api = self.get_vectorize_providers(\n token=self.token,\n environment=self.environment,\n api_endpoint=build_config[\"api_endpoint\"][\"value\"],\n )\n\n # Create a new dictionary with \"Bring your own\" first\n vectorize_providers: dict[str, list[list[str]]] = {\"Bring your own\": [[], []]}\n\n # Add the remaining items (only Nvidia) from the original dictionary\n vectorize_providers.update(\n {\n k: v\n for k, v in vectorize_providers_api.items()\n if k.lower() in [\"nvidia\"] # TODO: Eventually support more\n }\n )\n\n # Set provider options\n provider_field = \"02_embedding_generation_provider\"\n template[provider_field][\"options\"] = list(vectorize_providers.keys())\n\n # Add metadata for each provider option\n template[provider_field][\"options_metadata\"] = [\n {\"icon\": self.get_provider_icon(provider_name=provider)} for provider in template[provider_field][\"options\"]\n ]\n\n # Get selected embedding provider\n embedding_provider = template[provider_field][\"value\"]\n is_bring_your_own = embedding_provider and embedding_provider == \"Bring your own\"\n\n # Configure embedding model field\n model_field = \"03_embedding_generation_model\"\n template[model_field].update(\n {\n \"options\": vectorize_providers.get(embedding_provider, [[], []])[1],\n \"placeholder\": \"Bring your own\" if is_bring_your_own else None,\n \"readonly\": is_bring_your_own,\n \"required\": not is_bring_your_own,\n \"value\": None,\n }\n )\n\n # If this is a bring your own, set dimensions to 0\n return self.reset_dimension_field(build_config)\n\n def reset_dimension_field(self, build_config: dict) -> dict:\n \"\"\"Reset dimension field options based on provided configuration.\"\"\"\n # Extract template path for cleaner access\n template = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n\n # Get selected embedding model\n provider_field = \"02_embedding_generation_provider\"\n embedding_provider = template[provider_field][\"value\"]\n is_bring_your_own = embedding_provider and embedding_provider == \"Bring your own\"\n\n # Configure dimension field\n dimension_field = \"04_dimension\"\n dimension_value = 1024 if not is_bring_your_own else None # TODO: Dynamically figure this out\n template[dimension_field].update(\n {\n \"placeholder\": dimension_value,\n \"value\": dimension_value,\n \"readonly\": not is_bring_your_own,\n \"required\": is_bring_your_own,\n }\n )\n\n return build_config\n\n def reset_collection_list(self, build_config: dict) -> dict:\n \"\"\"Reset collection list options based on provided configuration.\"\"\"\n # Get collection options\n collection_options = self._initialize_collection_options(api_endpoint=build_config[\"api_endpoint\"][\"value\"])\n # Update collection configuration\n collection_config = build_config[\"collection_name\"]\n collection_config.update(\n {\n \"options\": [col[\"name\"] for col in collection_options],\n \"options_metadata\": [{k: v for k, v in col.items() if k != \"name\"} for col in collection_options],\n }\n )\n\n # Reset selected collection if not in options\n if collection_config[\"value\"] not in collection_config[\"options\"]:\n collection_config[\"value\"] = \"\"\n\n # Set advanced status based on database selection\n collection_config[\"show\"] = bool(build_config[\"database_name\"][\"value\"])\n\n return build_config\n\n def reset_database_list(self, build_config: dict) -> dict:\n \"\"\"Reset database list options and related configurations.\"\"\"\n # Get database options\n database_options = self._initialize_database_options()\n\n # Update cloud provider options\n env = self.environment\n template = build_config[\"database_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n template[\"02_cloud_provider\"][\"options\"] = list(self.map_cloud_providers()[env].keys())\n\n # Update database configuration\n database_config = build_config[\"database_name\"]\n database_config.update(\n {\n \"options\": [db[\"name\"] for db in database_options],\n \"options_metadata\": [{k: v for k, v in db.items() if k != \"name\"} for db in database_options],\n }\n )\n\n # Reset selections if value not in options\n if database_config[\"value\"] not in database_config[\"options\"]:\n database_config[\"value\"] = \"\"\n build_config[\"api_endpoint\"][\"value\"] = \"\"\n build_config[\"collection_name\"][\"show\"] = False\n\n # Set advanced status based on token presence\n database_config[\"show\"] = bool(build_config[\"token\"][\"value\"])\n\n return build_config\n\n def reset_build_config(self, build_config: dict) -> dict:\n \"\"\"Reset all build configuration options to default empty state.\"\"\"\n # Reset database configuration\n database_config = build_config[\"database_name\"]\n database_config.update({\"options\": [], \"options_metadata\": [], \"value\": \"\", \"show\": False})\n build_config[\"api_endpoint\"][\"value\"] = \"\"\n\n # Reset collection configuration\n collection_config = build_config[\"collection_name\"]\n collection_config.update({\"options\": [], \"options_metadata\": [], \"value\": \"\", \"show\": False})\n\n return build_config\n\n def _handle_hybrid_search_options(self, build_config: dict) -> dict:\n \"\"\"Set hybrid search options in the build configuration.\"\"\"\n # Detect what hybrid options are available\n # Get the admin object\n client = DataAPIClient(environment=self.environment)\n admin_client = client.get_admin()\n db_admin = admin_client.get_database_admin(self.get_api_endpoint(), token=self.token)\n\n # We will try to get the reranking providers to see if its hybrid emabled\n try:\n providers = db_admin.find_reranking_providers()\n build_config[\"reranker\"][\"options\"] = [\n model.name for provider_data in providers.reranking_providers.values() for model in provider_data.models\n ]\n build_config[\"reranker\"][\"options_metadata\"] = [\n {\"icon\": self.get_provider_icon(provider_name=model.name.split(\"/\")[0])}\n for provider in providers.reranking_providers.values()\n for model in provider.models\n ]\n build_config[\"reranker\"][\"value\"] = build_config[\"reranker\"][\"options\"][0]\n\n # Set the default search field to hybrid search\n build_config[\"search_method\"][\"show\"] = True\n build_config[\"search_method\"][\"options\"] = [\"Hybrid Search\", \"Vector Search\"]\n build_config[\"search_method\"][\"value\"] = \"Hybrid Search\"\n except Exception as _: # noqa: BLE001\n build_config[\"reranker\"][\"options\"] = []\n build_config[\"reranker\"][\"options_metadata\"] = []\n\n # Set the default search field to vector search\n build_config[\"search_method\"][\"show\"] = False\n build_config[\"search_method\"][\"options\"] = [\"Vector Search\"]\n build_config[\"search_method\"][\"value\"] = \"Vector Search\"\n\n # Set reranker and lexical terms options based on search method\n build_config[\"reranker\"][\"toggle_value\"] = True\n build_config[\"reranker\"][\"show\"] = build_config[\"search_method\"][\"value\"] == \"Hybrid Search\"\n build_config[\"reranker\"][\"toggle_disable\"] = build_config[\"search_method\"][\"value\"] == \"Hybrid Search\"\n if build_config[\"reranker\"][\"show\"]:\n build_config[\"search_type\"][\"value\"] = \"Similarity\"\n\n return build_config\n\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Update build configuration based on field name and value.\"\"\"\n # Early return if no token provided\n if not self.token:\n return self.reset_build_config(build_config)\n\n # Database creation callback\n if field_name == \"database_name\" and isinstance(field_value, dict):\n if \"01_new_database_name\" in field_value:\n await self._create_new_database(build_config, field_value)\n return self.reset_collection_list(build_config)\n return self._update_cloud_regions(build_config, field_value)\n\n # Collection creation callback\n if field_name == \"collection_name\" and isinstance(field_value, dict):\n # Case 1: New collection creation\n if \"01_new_collection_name\" in field_value:\n await self._create_new_collection(build_config, field_value)\n return build_config\n\n # Case 2: Update embedding provider options\n if \"02_embedding_generation_provider\" in field_value:\n return self.reset_provider_options(build_config)\n\n # Case 3: Update dimension field\n if \"03_embedding_generation_model\" in field_value:\n return self.reset_dimension_field(build_config)\n\n # Initial execution or token/environment change\n first_run = field_name == \"collection_name\" and not field_value and not build_config[\"database_name\"][\"options\"]\n if first_run or field_name in {\"token\", \"environment\"}:\n return self.reset_database_list(build_config)\n\n # Database selection change\n if field_name == \"database_name\" and not isinstance(field_value, dict):\n return self._handle_database_selection(build_config, field_value)\n\n # Keyspace selection change\n if field_name == \"keyspace\":\n return self.reset_collection_list(build_config)\n\n # Collection selection change\n if field_name == \"collection_name\" and not isinstance(field_value, dict):\n return self._handle_collection_selection(build_config, field_value)\n\n # Search method selection change\n if field_name == \"search_method\":\n is_vector_search = field_value == \"Vector Search\"\n is_autodetect = build_config[\"autodetect_collection\"][\"value\"]\n\n # Configure lexical terms (same for both cases)\n build_config[\"lexical_terms\"][\"show\"] = not is_vector_search\n build_config[\"lexical_terms\"][\"value\"] = \"\" if is_vector_search else build_config[\"lexical_terms\"][\"value\"]\n\n # Disable reranker disabling if hybrid search is selected\n build_config[\"reranker\"][\"toggle_disable\"] = not is_vector_search\n build_config[\"reranker\"][\"toggle_value\"] = True\n build_config[\"reranker\"][\"value\"] = build_config[\"reranker\"][\"options\"][0]\n\n # Toggle search type and score threshold based on search method\n build_config[\"search_type\"][\"show\"] = is_vector_search\n build_config[\"search_score_threshold\"][\"show\"] = is_vector_search\n\n # Make sure the search_type is set to \"Similarity\"\n if not is_vector_search or is_autodetect:\n build_config[\"search_type\"][\"value\"] = \"Similarity\"\n\n return build_config\n\n async def _create_new_database(self, build_config: dict, field_value: dict) -> None:\n \"\"\"Create a new database and update build config options.\"\"\"\n try:\n await self.create_database_api(\n new_database_name=field_value[\"01_new_database_name\"],\n token=self.token,\n keyspace=self.get_keyspace(),\n environment=self.environment,\n cloud_provider=field_value[\"02_cloud_provider\"],\n region=field_value[\"03_region\"],\n )\n except Exception as e:\n msg = f\"Error creating database: {e}\"\n raise ValueError(msg) from e\n\n build_config[\"database_name\"][\"options\"].append(field_value[\"01_new_database_name\"])\n build_config[\"database_name\"][\"options_metadata\"].append(\n {\n \"status\": \"PENDING\",\n \"collections\": 0,\n \"api_endpoint\": None,\n \"keyspaces\": [self.get_keyspace()],\n \"org_id\": None,\n }\n )\n\n def _update_cloud_regions(self, build_config: dict, field_value: dict) -> dict:\n \"\"\"Update cloud provider regions in build config.\"\"\"\n env = self.environment\n cloud_provider = field_value[\"02_cloud_provider\"]\n\n # Update the region options based on the selected cloud provider\n template = build_config[\"database_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n template[\"03_region\"][\"options\"] = self.map_cloud_providers()[env][cloud_provider][\"regions\"]\n\n # Reset the the 03_region value if it's not in the new options\n if template[\"03_region\"][\"value\"] not in template[\"03_region\"][\"options\"]:\n template[\"03_region\"][\"value\"] = None\n\n return build_config\n\n async def _create_new_collection(self, build_config: dict, field_value: dict) -> None:\n \"\"\"Create a new collection and update build config options.\"\"\"\n embedding_provider = field_value.get(\"02_embedding_generation_provider\")\n try:\n await self.create_collection_api(\n new_collection_name=field_value[\"01_new_collection_name\"],\n token=self.token,\n api_endpoint=build_config[\"api_endpoint\"][\"value\"],\n environment=self.environment,\n keyspace=self.get_keyspace(),\n dimension=field_value.get(\"04_dimension\") if embedding_provider == \"Bring your own\" else None,\n embedding_generation_provider=embedding_provider,\n embedding_generation_model=field_value.get(\"03_embedding_generation_model\"),\n reranker=self.reranker,\n )\n except Exception as e:\n msg = f\"Error creating collection: {e}\"\n raise ValueError(msg) from e\n\n provider = embedding_provider.lower() if embedding_provider and embedding_provider != \"Bring your own\" else None\n build_config[\"collection_name\"].update(\n {\n \"value\": field_value[\"01_new_collection_name\"],\n \"options\": build_config[\"collection_name\"][\"options\"] + [field_value[\"01_new_collection_name\"]],\n }\n )\n build_config[\"embedding_model\"][\"show\"] = not bool(provider)\n build_config[\"embedding_model\"][\"required\"] = not bool(provider)\n build_config[\"collection_name\"][\"options_metadata\"].append(\n {\n \"records\": 0,\n \"provider\": provider,\n \"icon\": self.get_provider_icon(provider_name=provider),\n \"model\": field_value.get(\"03_embedding_generation_model\"),\n }\n )\n\n # Make sure we always show the reranker options if the collection is hybrid enabled\n # And right now they always are\n build_config[\"lexical_terms\"][\"show\"] = True\n\n def _handle_database_selection(self, build_config: dict, field_value: str) -> dict:\n \"\"\"Handle database selection and update related configurations.\"\"\"\n build_config = self.reset_database_list(build_config)\n\n # Reset collection list if database selection changes\n if field_value not in build_config[\"database_name\"][\"options\"]:\n build_config[\"database_name\"][\"value\"] = \"\"\n return build_config\n\n # Get the api endpoint for the selected database\n index = build_config[\"database_name\"][\"options\"].index(field_value)\n build_config[\"api_endpoint\"][\"value\"] = build_config[\"database_name\"][\"options_metadata\"][index][\"api_endpoint\"]\n\n # Get the org_id for the selected database\n org_id = build_config[\"database_name\"][\"options_metadata\"][index][\"org_id\"]\n if not org_id:\n return build_config\n\n # Update the list of keyspaces based on the db info\n build_config[\"keyspace\"][\"options\"] = build_config[\"database_name\"][\"options_metadata\"][index][\"keyspaces\"]\n build_config[\"keyspace\"][\"value\"] = (\n build_config[\"keyspace\"][\"options\"] and build_config[\"keyspace\"][\"options\"][0]\n if build_config[\"keyspace\"][\"value\"] not in build_config[\"keyspace\"][\"options\"]\n else build_config[\"keyspace\"][\"value\"]\n )\n\n # Get the database id for the selected database\n db_id = self.get_database_id_static(api_endpoint=build_config[\"api_endpoint\"][\"value\"])\n keyspace = self.get_keyspace()\n\n # Update the helper text for the embedding provider field\n template = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n template[\"02_embedding_generation_provider\"][\"helper_text\"] = (\n \"To create collections with more embedding provider options, go to \"\n f''\n \"your database in Astra DB.\"\n )\n\n # Reset provider options\n build_config = self.reset_provider_options(build_config)\n\n # Handle hybrid search options\n build_config = self._handle_hybrid_search_options(build_config)\n\n return self.reset_collection_list(build_config)\n\n def _handle_collection_selection(self, build_config: dict, field_value: str) -> dict:\n \"\"\"Handle collection selection and update embedding options.\"\"\"\n build_config[\"autodetect_collection\"][\"value\"] = True\n build_config = self.reset_collection_list(build_config)\n\n # Reset embedding model if collection selection changes\n if field_value and field_value not in build_config[\"collection_name\"][\"options\"]:\n build_config[\"collection_name\"][\"options\"].append(field_value)\n build_config[\"collection_name\"][\"options_metadata\"].append(\n {\n \"records\": 0,\n \"provider\": None,\n \"icon\": \"vectorstores\",\n \"model\": None,\n }\n )\n build_config[\"autodetect_collection\"][\"value\"] = False\n\n if not field_value:\n return build_config\n\n # Get the selected collection index\n index = build_config[\"collection_name\"][\"options\"].index(field_value)\n\n # Set the provider of the selected collection\n provider = build_config[\"collection_name\"][\"options_metadata\"][index][\"provider\"]\n build_config[\"embedding_model\"][\"show\"] = not bool(provider)\n build_config[\"embedding_model\"][\"required\"] = not bool(provider)\n\n # Grab the collection object\n database = self.get_database_object(api_endpoint=build_config[\"api_endpoint\"][\"value\"])\n collection = database.get_collection(\n name=field_value,\n keyspace=build_config[\"keyspace\"][\"value\"],\n )\n\n # Check if hybrid and lexical are enabled\n col_options = collection.options()\n hyb_enabled = col_options.rerank and col_options.rerank.enabled\n lex_enabled = col_options.lexical and col_options.lexical.enabled\n user_hyb_enabled = build_config[\"search_method\"][\"value\"] == \"Hybrid Search\"\n\n # Show lexical terms if the collection is hybrid enabled\n build_config[\"lexical_terms\"][\"show\"] = hyb_enabled and lex_enabled and user_hyb_enabled\n\n return build_config\n\n @check_cached_vector_store\n def build_vector_store(self):\n try:\n from langchain_astradb import AstraDBVectorStore\n except ImportError as e:\n msg = (\n \"Could not import langchain Astra DB integration package. \"\n \"Please install it with `pip install langchain-astradb`.\"\n )\n raise ImportError(msg) from e\n\n # Get the embedding model and additional params\n embedding_params = {\"embedding\": self.embedding_model} if self.embedding_model else {}\n\n # Get the additional parameters\n additional_params = self.astradb_vectorstore_kwargs or {}\n\n # Get Langflow version and platform information\n __version__ = get_version_info()[\"version\"]\n langflow_prefix = \"\"\n # if os.getenv(\"AWS_EXECUTION_ENV\") == \"AWS_ECS_FARGATE\": # TODO: More precise way of detecting\n # langflow_prefix = \"ds-\"\n\n # Get the database object\n database = self.get_database_object()\n autodetect = self.collection_name in database.list_collection_names() and self.autodetect_collection\n\n # Bundle up the auto-detect parameters\n autodetect_params = {\n \"autodetect_collection\": autodetect,\n \"content_field\": (\n self.content_field\n if self.content_field and embedding_params\n else (\n \"page_content\"\n if embedding_params\n and self.collection_data(collection_name=self.collection_name, database=database) == 0\n else None\n )\n ),\n \"ignore_invalid_documents\": self.ignore_invalid_documents,\n }\n\n # Choose HybridSearchMode based on the selected param\n hybrid_search_mode = HybridSearchMode.DEFAULT if self.search_method == \"Hybrid Search\" else HybridSearchMode.OFF\n\n # Attempt to build the Vector Store object\n try:\n vector_store = AstraDBVectorStore(\n # Astra DB Authentication Parameters\n token=self.token,\n api_endpoint=database.api_endpoint,\n namespace=database.keyspace,\n collection_name=self.collection_name,\n environment=self.environment,\n # Hybrid Search Parameters\n hybrid_search=hybrid_search_mode,\n # Astra DB Usage Tracking Parameters\n ext_callers=[(f\"{langflow_prefix}langflow\", __version__)],\n # Astra DB Vector Store Parameters\n **autodetect_params,\n **embedding_params,\n **additional_params,\n )\n except Exception as e:\n msg = f\"Error initializing AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n # Add documents to the vector store\n self._add_documents_to_vector_store(vector_store)\n\n return vector_store\n\n def _add_documents_to_vector_store(self, vector_store) -> None:\n self.ingest_data = self._prepare_ingest_data()\n\n documents = []\n for _input in self.ingest_data or []:\n if isinstance(_input, Data):\n documents.append(_input.to_lc_document())\n else:\n msg = \"Vector Store Inputs must be Data objects.\"\n raise TypeError(msg)\n\n if documents and self.deletion_field:\n self.log(f\"Deleting documents where {self.deletion_field}\")\n try:\n database = self.get_database_object()\n collection = database.get_collection(self.collection_name, keyspace=database.keyspace)\n delete_values = list({doc.metadata[self.deletion_field] for doc in documents})\n self.log(f\"Deleting documents where {self.deletion_field} matches {delete_values}.\")\n collection.delete_many({f\"metadata.{self.deletion_field}\": {\"$in\": delete_values}})\n except Exception as e:\n msg = f\"Error deleting documents from AstraDBVectorStore based on '{self.deletion_field}': {e}\"\n raise ValueError(msg) from e\n\n if documents:\n self.log(f\"Adding {len(documents)} documents to the Vector Store.\")\n try:\n vector_store.add_documents(documents)\n except Exception as e:\n msg = f\"Error adding documents to AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n else:\n self.log(\"No documents to add to the Vector Store.\")\n\n def _map_search_type(self) -> str:\n search_type_mapping = {\n \"Similarity with score threshold\": \"similarity_score_threshold\",\n \"MMR (Max Marginal Relevance)\": \"mmr\",\n }\n\n return search_type_mapping.get(self.search_type, \"similarity\")\n\n def _build_search_args(self):\n # Clean up the search query\n query = self.search_query if isinstance(self.search_query, str) and self.search_query.strip() else None\n lexical_terms = self.lexical_terms or None\n\n # Check if we have a search query, and if so set the args\n if query:\n args = {\n \"query\": query,\n \"search_type\": self._map_search_type(),\n \"k\": self.number_of_results,\n \"score_threshold\": self.search_score_threshold,\n \"lexical_query\": lexical_terms,\n }\n elif self.advanced_search_filter:\n args = {\n \"n\": self.number_of_results,\n }\n else:\n return {}\n\n filter_arg = self.advanced_search_filter or {}\n if filter_arg:\n args[\"filter\"] = filter_arg\n\n return args\n\n def search_documents(self, vector_store=None) -> list[Data]:\n vector_store = vector_store or self.build_vector_store()\n\n self.log(f\"Search input: {self.search_query}\")\n self.log(f\"Search type: {self.search_type}\")\n self.log(f\"Number of results: {self.number_of_results}\")\n self.log(f\"store.hybrid_search: {vector_store.hybrid_search}\")\n self.log(f\"Lexical terms: {self.lexical_terms}\")\n self.log(f\"Reranker: {self.reranker}\")\n\n try:\n search_args = self._build_search_args()\n except Exception as e:\n msg = f\"Error in AstraDBVectorStore._build_search_args: {e}\"\n raise ValueError(msg) from e\n\n if not search_args:\n self.log(\"No search input or filters provided. Skipping search.\")\n return []\n\n docs = []\n search_method = \"search\" if \"query\" in search_args else \"metadata_search\"\n\n try:\n self.log(f\"Calling vector_store.{search_method} with args: {search_args}\")\n docs = getattr(vector_store, search_method)(**search_args)\n except Exception as e:\n msg = f\"Error performing {search_method} in AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n self.log(f\"Retrieved documents: {len(docs)}\")\n\n data = docs_to_data(docs)\n self.log(f\"Converted documents to data: {len(data)}\")\n self.status = data\n\n return data\n\n def get_retriever_kwargs(self):\n search_args = self._build_search_args()\n\n return {\n \"search_type\": self._map_search_type(),\n \"search_kwargs\": search_args,\n }\n"
+ "value": "import re\nfrom collections import defaultdict\nfrom dataclasses import asdict, dataclass, field\n\nfrom astrapy import DataAPIClient, Database\nfrom astrapy.data.info.reranking import RerankServiceOptions\nfrom astrapy.info import CollectionDescriptor, CollectionLexicalOptions, CollectionRerankOptions\nfrom langchain_astradb import AstraDBVectorStore, VectorServiceOptions\nfrom langchain_astradb.utils.astradb import HybridSearchMode, _AstraDBCollectionEnvironment\n\nfrom langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom langflow.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom langflow.helpers.data import docs_to_data\nfrom langflow.inputs.inputs import FloatInput, NestedDictInput\nfrom langflow.io import (\n BoolInput,\n DropdownInput,\n HandleInput,\n IntInput,\n QueryInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.data import Data\nfrom langflow.utils.version import get_version_info\n\n\n@vector_store_connection\nclass AstraDBVectorStoreComponent(LCVectorStoreComponent):\n display_name: str = \"Astra DB\"\n description: str = \"Ingest and search documents in Astra DB\"\n documentation: str = \"https://docs.datastax.com/en/langflow/astra-components.html\"\n name = \"AstraDB\"\n icon: str = \"AstraDB\"\n\n _cached_vector_store: AstraDBVectorStore | None = None\n\n @dataclass\n class NewDatabaseInput:\n functionality: str = \"create\"\n fields: dict[str, dict] = field(\n default_factory=lambda: {\n \"data\": {\n \"node\": {\n \"name\": \"create_database\",\n \"description\": \"Please allow several minutes for creation to complete.\",\n \"display_name\": \"Create new database\",\n \"field_order\": [\"01_new_database_name\", \"02_cloud_provider\", \"03_region\"],\n \"template\": {\n \"01_new_database_name\": StrInput(\n name=\"new_database_name\",\n display_name=\"Name\",\n info=\"Name of the new database to create in Astra DB.\",\n required=True,\n ),\n \"02_cloud_provider\": DropdownInput(\n name=\"cloud_provider\",\n display_name=\"Cloud provider\",\n info=\"Cloud provider for the new database.\",\n options=[],\n required=True,\n real_time_refresh=True,\n ),\n \"03_region\": DropdownInput(\n name=\"region\",\n display_name=\"Region\",\n info=\"Region for the new database.\",\n options=[],\n required=True,\n ),\n },\n },\n }\n }\n )\n\n @dataclass\n class NewCollectionInput:\n functionality: str = \"create\"\n fields: dict[str, dict] = field(\n default_factory=lambda: {\n \"data\": {\n \"node\": {\n \"name\": \"create_collection\",\n \"description\": \"Please allow several seconds for creation to complete.\",\n \"display_name\": \"Create new collection\",\n \"field_order\": [\n \"01_new_collection_name\",\n \"02_embedding_generation_provider\",\n \"03_embedding_generation_model\",\n \"04_dimension\",\n ],\n \"template\": {\n \"01_new_collection_name\": StrInput(\n name=\"new_collection_name\",\n display_name=\"Name\",\n info=\"Name of the new collection to create in Astra DB.\",\n required=True,\n ),\n \"02_embedding_generation_provider\": DropdownInput(\n name=\"embedding_generation_provider\",\n display_name=\"Embedding generation method\",\n info=\"Provider to use for generating embeddings.\",\n helper_text=(\n \"To create collections with more embedding provider options, go to \"\n 'your database in Astra DB'\n ),\n real_time_refresh=True,\n required=True,\n options=[],\n ),\n \"03_embedding_generation_model\": DropdownInput(\n name=\"embedding_generation_model\",\n display_name=\"Embedding model\",\n info=\"Model to use for generating embeddings.\",\n real_time_refresh=True,\n options=[],\n ),\n \"04_dimension\": IntInput(\n name=\"dimension\",\n display_name=\"Dimensions\",\n info=\"Dimensions of the embeddings to generate.\",\n value=None,\n ),\n },\n },\n }\n }\n )\n\n inputs = [\n SecretStrInput(\n name=\"token\",\n display_name=\"Astra DB Application Token\",\n info=\"Authentication token for accessing Astra DB.\",\n value=\"ASTRA_DB_APPLICATION_TOKEN\",\n required=True,\n real_time_refresh=True,\n input_types=[],\n ),\n DropdownInput(\n name=\"environment\",\n display_name=\"Environment\",\n info=\"The environment for the Astra DB API Endpoint.\",\n options=[\"prod\", \"test\", \"dev\"],\n value=\"prod\",\n advanced=True,\n real_time_refresh=True,\n combobox=True,\n ),\n DropdownInput(\n name=\"database_name\",\n display_name=\"Database\",\n info=\"The Database name for the Astra DB instance.\",\n required=True,\n refresh_button=True,\n real_time_refresh=True,\n dialog_inputs=asdict(NewDatabaseInput()),\n combobox=True,\n ),\n StrInput(\n name=\"api_endpoint\",\n display_name=\"Astra DB API Endpoint\",\n info=\"The API Endpoint for the Astra DB instance. Supercedes database selection.\",\n show=False,\n ),\n DropdownInput(\n name=\"keyspace\",\n display_name=\"Keyspace\",\n info=\"Optional keyspace within Astra DB to use for the collection.\",\n advanced=True,\n options=[],\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"collection_name\",\n display_name=\"Collection\",\n info=\"The name of the collection within Astra DB where the vectors will be stored.\",\n required=True,\n refresh_button=True,\n real_time_refresh=True,\n dialog_inputs=asdict(NewCollectionInput()),\n combobox=True,\n show=False,\n ),\n HandleInput(\n name=\"embedding_model\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Specify the Embedding Model. Not required for Astra Vectorize collections.\",\n required=False,\n show=False,\n ),\n *LCVectorStoreComponent.inputs,\n DropdownInput(\n name=\"search_method\",\n display_name=\"Search Method\",\n info=(\n \"Determine how your content is matched: Vector finds semantic similarity, \"\n \"and Hybrid Search (suggested) combines both approaches \"\n \"with a reranker.\"\n ),\n options=[\"Hybrid Search\", \"Vector Search\"], # TODO: Restore Lexical Search?\n options_metadata=[{\"icon\": \"SearchHybrid\"}, {\"icon\": \"SearchVector\"}],\n value=\"Vector Search\",\n advanced=True,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"reranker\",\n display_name=\"Reranker\",\n info=\"Post-retrieval model that re-scores results for optimal relevance ranking.\",\n show=False,\n toggle=True,\n ),\n QueryInput(\n name=\"lexical_terms\",\n display_name=\"Lexical Terms\",\n info=\"Add additional terms/keywords to augment search precision.\",\n placeholder=\"Enter terms to search...\",\n separator=\" \",\n show=False,\n value=\"\",\n advanced=True,\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Number of Search Results\",\n info=\"Number of search results to return.\",\n advanced=True,\n value=4,\n ),\n DropdownInput(\n name=\"search_type\",\n display_name=\"Search Type\",\n info=\"Search type to use\",\n options=[\"Similarity\", \"Similarity with score threshold\", \"MMR (Max Marginal Relevance)\"],\n value=\"Similarity\",\n advanced=True,\n ),\n FloatInput(\n name=\"search_score_threshold\",\n display_name=\"Search Score Threshold\",\n info=\"Minimum similarity score threshold for search results. \"\n \"(when using 'Similarity with score threshold')\",\n value=0,\n advanced=True,\n ),\n NestedDictInput(\n name=\"advanced_search_filter\",\n display_name=\"Search Metadata Filter\",\n info=\"Optional dictionary of filters to apply to the search query.\",\n advanced=True,\n ),\n BoolInput(\n name=\"autodetect_collection\",\n display_name=\"Autodetect Collection\",\n info=\"Boolean flag to determine whether to autodetect the collection.\",\n advanced=True,\n value=True,\n ),\n StrInput(\n name=\"content_field\",\n display_name=\"Content Field\",\n info=\"Field to use as the text content field for the vector store.\",\n advanced=True,\n ),\n StrInput(\n name=\"deletion_field\",\n display_name=\"Deletion Based On Field\",\n info=\"When this parameter is provided, documents in the target collection with \"\n \"metadata field values matching the input metadata field value will be deleted \"\n \"before new data is loaded.\",\n advanced=True,\n ),\n BoolInput(\n name=\"ignore_invalid_documents\",\n display_name=\"Ignore Invalid Documents\",\n info=\"Boolean flag to determine whether to ignore invalid documents at runtime.\",\n advanced=True,\n ),\n NestedDictInput(\n name=\"astradb_vectorstore_kwargs\",\n display_name=\"AstraDBVectorStore Parameters\",\n info=\"Optional dictionary of additional parameters for the AstraDBVectorStore.\",\n advanced=True,\n ),\n ]\n\n @classmethod\n def map_cloud_providers(cls):\n # TODO: Programmatically fetch the regions for each cloud provider\n return {\n \"dev\": {\n \"Amazon Web Services\": {\n \"id\": \"aws\",\n \"regions\": [\"us-west-2\"],\n },\n \"Google Cloud Platform\": {\n \"id\": \"gcp\",\n \"regions\": [\"us-central1\", \"europe-west4\"],\n },\n },\n \"test\": {\n \"Google Cloud Platform\": {\n \"id\": \"gcp\",\n \"regions\": [\"us-central1\"],\n },\n },\n \"prod\": {\n \"Amazon Web Services\": {\n \"id\": \"aws\",\n \"regions\": [\"us-east-2\", \"ap-south-1\", \"eu-west-1\"],\n },\n \"Google Cloud Platform\": {\n \"id\": \"gcp\",\n \"regions\": [\"us-east1\"],\n },\n \"Microsoft Azure\": {\n \"id\": \"azure\",\n \"regions\": [\"westus3\"],\n },\n },\n }\n\n @classmethod\n def get_vectorize_providers(cls, token: str, environment: str | None = None, api_endpoint: str | None = None):\n try:\n # Get the admin object\n client = DataAPIClient(environment=environment)\n admin_client = client.get_admin()\n db_admin = admin_client.get_database_admin(api_endpoint, token=token)\n\n # Get the list of embedding providers\n embedding_providers = db_admin.find_embedding_providers()\n\n vectorize_providers_mapping = {}\n # Map the provider display name to the provider key and models\n for provider_key, provider_data in embedding_providers.embedding_providers.items():\n # Get the provider display name and models\n display_name = provider_data.display_name\n models = [model.name for model in provider_data.models]\n\n # Build our mapping\n vectorize_providers_mapping[display_name] = [provider_key, models]\n\n # Sort the resulting dictionary\n return defaultdict(list, dict(sorted(vectorize_providers_mapping.items())))\n except Exception as _: # noqa: BLE001\n return {}\n\n @classmethod\n async def create_database_api(\n cls,\n new_database_name: str,\n cloud_provider: str,\n region: str,\n token: str,\n environment: str | None = None,\n keyspace: str | None = None,\n ):\n client = DataAPIClient(environment=environment)\n\n # Get the admin object\n admin_client = client.get_admin(token=token)\n\n # Get the environment, set to prod if null like\n my_env = environment or \"prod\"\n\n # Raise a value error if name isn't provided\n if not new_database_name:\n msg = \"Database name is required to create a new database.\"\n raise ValueError(msg)\n\n # Call the create database function\n return await admin_client.async_create_database(\n name=new_database_name,\n cloud_provider=cls.map_cloud_providers()[my_env][cloud_provider][\"id\"],\n region=region,\n keyspace=keyspace,\n wait_until_active=False,\n )\n\n @classmethod\n async def create_collection_api(\n cls,\n new_collection_name: str,\n token: str,\n api_endpoint: str,\n environment: str | None = None,\n keyspace: str | None = None,\n dimension: int | None = None,\n embedding_generation_provider: str | None = None,\n embedding_generation_model: str | None = None,\n reranker: str | None = None,\n ):\n # Build vectorize options, if needed\n vectorize_options = None\n if not dimension:\n providers = cls.get_vectorize_providers(token=token, environment=environment, api_endpoint=api_endpoint)\n vectorize_options = VectorServiceOptions(\n provider=providers.get(embedding_generation_provider, [None, []])[0],\n model_name=embedding_generation_model,\n )\n\n # Raise a value error if name isn't provided\n if not new_collection_name:\n msg = \"Collection name is required to create a new collection.\"\n raise ValueError(msg)\n\n # Define the base arguments being passed to the create collection function\n base_args = {\n \"collection_name\": new_collection_name,\n \"token\": token,\n \"api_endpoint\": api_endpoint,\n \"keyspace\": keyspace,\n \"environment\": environment,\n \"embedding_dimension\": dimension,\n \"collection_vector_service_options\": vectorize_options,\n }\n\n # Add optional arguments if the reranker is set\n if reranker:\n # Split the reranker field into a provider a model name\n provider, _ = reranker.split(\"/\")\n base_args[\"collection_rerank\"] = CollectionRerankOptions(\n service=RerankServiceOptions(provider=provider, model_name=reranker),\n )\n base_args[\"collection_lexical\"] = CollectionLexicalOptions(analyzer=\"STANDARD\")\n\n _AstraDBCollectionEnvironment(**base_args)\n\n @classmethod\n def get_database_list_static(cls, token: str, environment: str | None = None):\n client = DataAPIClient(environment=environment)\n\n # Get the admin object\n admin_client = client.get_admin(token=token)\n\n # Get the list of databases\n db_list = admin_client.list_databases()\n\n # Generate the api endpoint for each database\n db_info_dict = {}\n for db in db_list:\n try:\n # Get the API endpoint for the database\n api_endpoint = db.regions[0].api_endpoint\n\n # Get the number of collections\n try:\n # Get the number of collections in the database\n num_collections = len(\n client.get_database(\n api_endpoint,\n token=token,\n ).list_collection_names()\n )\n except Exception: # noqa: BLE001\n if db.status != \"PENDING\":\n continue\n num_collections = 0\n\n # Add the database to the dictionary\n db_info_dict[db.name] = {\n \"api_endpoint\": api_endpoint,\n \"keyspaces\": db.keyspaces,\n \"collections\": num_collections,\n \"status\": db.status if db.status != \"ACTIVE\" else None,\n \"org_id\": db.org_id if db.org_id else None,\n }\n except Exception: # noqa: BLE001, S110\n pass\n\n return db_info_dict\n\n def get_database_list(self):\n return self.get_database_list_static(\n token=self.token,\n environment=self.environment,\n )\n\n @classmethod\n def get_api_endpoint_static(\n cls,\n token: str,\n environment: str | None = None,\n api_endpoint: str | None = None,\n database_name: str | None = None,\n ):\n # If the api_endpoint is set, return it\n if api_endpoint:\n return api_endpoint\n\n # Check if the database_name is like a url\n if database_name and database_name.startswith(\"https://\"):\n return database_name\n\n # If the database is not set, nothing we can do.\n if not database_name:\n return None\n\n # Grab the database object\n db = cls.get_database_list_static(token=token, environment=environment).get(database_name)\n if not db:\n return None\n\n # Otherwise, get the URL from the database list\n return db.get(\"api_endpoint\")\n\n def get_api_endpoint(self):\n return self.get_api_endpoint_static(\n token=self.token,\n environment=self.environment,\n api_endpoint=self.api_endpoint,\n database_name=self.database_name,\n )\n\n @classmethod\n def get_database_id_static(cls, api_endpoint: str) -> str | None:\n # Pattern matches standard UUID format: 8-4-4-4-12 hexadecimal characters\n uuid_pattern = r\"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}\"\n match = re.search(uuid_pattern, api_endpoint)\n\n return match.group(0) if match else None\n\n def get_database_id(self):\n return self.get_database_id_static(api_endpoint=self.get_api_endpoint())\n\n def get_keyspace(self):\n keyspace = self.keyspace\n\n if keyspace:\n return keyspace.strip()\n\n return \"default_keyspace\"\n\n def get_database_object(self, api_endpoint: str | None = None):\n try:\n client = DataAPIClient(environment=self.environment)\n\n return client.get_database(\n api_endpoint or self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n except Exception as e:\n msg = f\"Error fetching database object: {e}\"\n raise ValueError(msg) from e\n\n def collection_data(self, collection_name: str, database: Database | None = None):\n try:\n if not database:\n client = DataAPIClient(environment=self.environment)\n\n database = client.get_database(\n self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n\n collection = database.get_collection(collection_name)\n\n return collection.estimated_document_count()\n except Exception as e: # noqa: BLE001\n self.log(f\"Error checking collection data: {e}\")\n\n return None\n\n def _initialize_database_options(self):\n try:\n return [\n {\n \"name\": name,\n \"status\": info[\"status\"],\n \"collections\": info[\"collections\"],\n \"api_endpoint\": info[\"api_endpoint\"],\n \"keyspaces\": info[\"keyspaces\"],\n \"org_id\": info[\"org_id\"],\n }\n for name, info in self.get_database_list().items()\n ]\n except Exception as e:\n msg = f\"Error fetching database options: {e}\"\n raise ValueError(msg) from e\n\n @classmethod\n def get_provider_icon(cls, collection: CollectionDescriptor | None = None, provider_name: str | None = None) -> str:\n # Get the provider name from the collection\n provider_name = provider_name or (\n collection.definition.vector.service.provider\n if (\n collection\n and collection.definition\n and collection.definition.vector\n and collection.definition.vector.service\n )\n else None\n )\n\n # If there is no provider, use the vector store icon\n if not provider_name or provider_name.lower() == \"bring your own\":\n return \"vectorstores\"\n\n # Map provider casings\n case_map = {\n \"nvidia\": \"NVIDIA\",\n \"openai\": \"OpenAI\",\n \"amazon bedrock\": \"AmazonBedrockEmbeddings\",\n \"azure openai\": \"AzureOpenAiEmbeddings\",\n \"cohere\": \"Cohere\",\n \"jina ai\": \"JinaAI\",\n \"mistral ai\": \"MistralAI\",\n \"upstage\": \"Upstage\",\n \"voyage ai\": \"VoyageAI\",\n }\n\n # Adjust the casing on some like nvidia\n return case_map[provider_name.lower()] if provider_name.lower() in case_map else provider_name.title()\n\n def _initialize_collection_options(self, api_endpoint: str | None = None):\n # Nothing to generate if we don't have an API endpoint yet\n api_endpoint = api_endpoint or self.get_api_endpoint()\n if not api_endpoint:\n return []\n\n # Retrieve the database object\n database = self.get_database_object(api_endpoint=api_endpoint)\n\n # Get the list of collections\n collection_list = database.list_collections(keyspace=self.get_keyspace())\n\n # Return the list of collections and metadata associated\n return [\n {\n \"name\": col.name,\n \"records\": self.collection_data(collection_name=col.name, database=database),\n \"provider\": (\n col.definition.vector.service.provider\n if col.definition.vector and col.definition.vector.service\n else None\n ),\n \"icon\": self.get_provider_icon(collection=col),\n \"model\": (\n col.definition.vector.service.model_name\n if col.definition.vector and col.definition.vector.service\n else None\n ),\n }\n for col in collection_list\n ]\n\n def reset_provider_options(self, build_config: dict) -> dict:\n \"\"\"Reset provider options and related configurations in the build_config dictionary.\"\"\"\n # Extract template path for cleaner access\n template = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n\n # Get vectorize providers\n vectorize_providers_api = self.get_vectorize_providers(\n token=self.token,\n environment=self.environment,\n api_endpoint=build_config[\"api_endpoint\"][\"value\"],\n )\n\n # Create a new dictionary with \"Bring your own\" first\n vectorize_providers: dict[str, list[list[str]]] = {\"Bring your own\": [[], []]}\n\n # Add the remaining items (only Nvidia) from the original dictionary\n vectorize_providers.update(\n {\n k: v\n for k, v in vectorize_providers_api.items()\n if k.lower() in [\"nvidia\"] # TODO: Eventually support more\n }\n )\n\n # Set provider options\n provider_field = \"02_embedding_generation_provider\"\n template[provider_field][\"options\"] = list(vectorize_providers.keys())\n\n # Add metadata for each provider option\n template[provider_field][\"options_metadata\"] = [\n {\"icon\": self.get_provider_icon(provider_name=provider)} for provider in template[provider_field][\"options\"]\n ]\n\n # Get selected embedding provider\n embedding_provider = template[provider_field][\"value\"]\n is_bring_your_own = embedding_provider and embedding_provider == \"Bring your own\"\n\n # Configure embedding model field\n model_field = \"03_embedding_generation_model\"\n template[model_field].update(\n {\n \"options\": vectorize_providers.get(embedding_provider, [[], []])[1],\n \"placeholder\": \"Bring your own\" if is_bring_your_own else None,\n \"readonly\": is_bring_your_own,\n \"required\": not is_bring_your_own,\n \"value\": None,\n }\n )\n\n # If this is a bring your own, set dimensions to 0\n return self.reset_dimension_field(build_config)\n\n def reset_dimension_field(self, build_config: dict) -> dict:\n \"\"\"Reset dimension field options based on provided configuration.\"\"\"\n # Extract template path for cleaner access\n template = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n\n # Get selected embedding model\n provider_field = \"02_embedding_generation_provider\"\n embedding_provider = template[provider_field][\"value\"]\n is_bring_your_own = embedding_provider and embedding_provider == \"Bring your own\"\n\n # Configure dimension field\n dimension_field = \"04_dimension\"\n dimension_value = 1024 if not is_bring_your_own else None # TODO: Dynamically figure this out\n template[dimension_field].update(\n {\n \"placeholder\": dimension_value,\n \"value\": dimension_value,\n \"readonly\": not is_bring_your_own,\n \"required\": is_bring_your_own,\n }\n )\n\n return build_config\n\n def reset_collection_list(self, build_config: dict) -> dict:\n \"\"\"Reset collection list options based on provided configuration.\"\"\"\n # Get collection options\n collection_options = self._initialize_collection_options(api_endpoint=build_config[\"api_endpoint\"][\"value\"])\n # Update collection configuration\n collection_config = build_config[\"collection_name\"]\n collection_config.update(\n {\n \"options\": [col[\"name\"] for col in collection_options],\n \"options_metadata\": [{k: v for k, v in col.items() if k != \"name\"} for col in collection_options],\n }\n )\n\n # Reset selected collection if not in options\n if collection_config[\"value\"] not in collection_config[\"options\"]:\n collection_config[\"value\"] = \"\"\n\n # Set advanced status based on database selection\n collection_config[\"show\"] = bool(build_config[\"database_name\"][\"value\"])\n\n return build_config\n\n def reset_database_list(self, build_config: dict) -> dict:\n \"\"\"Reset database list options and related configurations.\"\"\"\n # Get database options\n database_options = self._initialize_database_options()\n\n # Update cloud provider options\n env = self.environment\n template = build_config[\"database_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n template[\"02_cloud_provider\"][\"options\"] = list(self.map_cloud_providers()[env].keys())\n\n # Update database configuration\n database_config = build_config[\"database_name\"]\n database_config.update(\n {\n \"options\": [db[\"name\"] for db in database_options],\n \"options_metadata\": [{k: v for k, v in db.items() if k != \"name\"} for db in database_options],\n }\n )\n\n # Reset selections if value not in options\n if database_config[\"value\"] not in database_config[\"options\"]:\n database_config[\"value\"] = \"\"\n build_config[\"api_endpoint\"][\"value\"] = \"\"\n build_config[\"collection_name\"][\"show\"] = False\n\n # Set advanced status based on token presence\n database_config[\"show\"] = bool(build_config[\"token\"][\"value\"])\n\n return build_config\n\n def reset_build_config(self, build_config: dict) -> dict:\n \"\"\"Reset all build configuration options to default empty state.\"\"\"\n # Reset database configuration\n database_config = build_config[\"database_name\"]\n database_config.update({\"options\": [], \"options_metadata\": [], \"value\": \"\", \"show\": False})\n build_config[\"api_endpoint\"][\"value\"] = \"\"\n\n # Reset collection configuration\n collection_config = build_config[\"collection_name\"]\n collection_config.update({\"options\": [], \"options_metadata\": [], \"value\": \"\", \"show\": False})\n\n return build_config\n\n def _handle_hybrid_search_options(self, build_config: dict) -> dict:\n \"\"\"Set hybrid search options in the build configuration.\"\"\"\n # Detect what hybrid options are available\n # Get the admin object\n client = DataAPIClient(environment=self.environment)\n admin_client = client.get_admin()\n db_admin = admin_client.get_database_admin(self.get_api_endpoint(), token=self.token)\n\n # We will try to get the reranking providers to see if its hybrid emabled\n try:\n providers = db_admin.find_reranking_providers()\n build_config[\"reranker\"][\"options\"] = [\n model.name for provider_data in providers.reranking_providers.values() for model in provider_data.models\n ]\n build_config[\"reranker\"][\"options_metadata\"] = [\n {\"icon\": self.get_provider_icon(provider_name=model.name.split(\"/\")[0])}\n for provider in providers.reranking_providers.values()\n for model in provider.models\n ]\n build_config[\"reranker\"][\"value\"] = build_config[\"reranker\"][\"options\"][0]\n\n # Set the default search field to hybrid search\n build_config[\"search_method\"][\"show\"] = True\n build_config[\"search_method\"][\"options\"] = [\"Hybrid Search\", \"Vector Search\"]\n build_config[\"search_method\"][\"value\"] = \"Hybrid Search\"\n except Exception as _: # noqa: BLE001\n build_config[\"reranker\"][\"options\"] = []\n build_config[\"reranker\"][\"options_metadata\"] = []\n\n # Set the default search field to vector search\n build_config[\"search_method\"][\"show\"] = False\n build_config[\"search_method\"][\"options\"] = [\"Vector Search\"]\n build_config[\"search_method\"][\"value\"] = \"Vector Search\"\n\n # Set reranker and lexical terms options based on search method\n build_config[\"reranker\"][\"toggle_value\"] = True\n build_config[\"reranker\"][\"show\"] = build_config[\"search_method\"][\"value\"] == \"Hybrid Search\"\n build_config[\"reranker\"][\"toggle_disable\"] = build_config[\"search_method\"][\"value\"] == \"Hybrid Search\"\n if build_config[\"reranker\"][\"show\"]:\n build_config[\"search_type\"][\"value\"] = \"Similarity\"\n\n return build_config\n\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Update build configuration based on field name and value.\"\"\"\n # Early return if no token provided\n if not self.token:\n return self.reset_build_config(build_config)\n\n # Database creation callback\n if field_name == \"database_name\" and isinstance(field_value, dict):\n if \"01_new_database_name\" in field_value:\n await self._create_new_database(build_config, field_value)\n return self.reset_collection_list(build_config)\n return self._update_cloud_regions(build_config, field_value)\n\n # Collection creation callback\n if field_name == \"collection_name\" and isinstance(field_value, dict):\n # Case 1: New collection creation\n if \"01_new_collection_name\" in field_value:\n await self._create_new_collection(build_config, field_value)\n return build_config\n\n # Case 2: Update embedding provider options\n if \"02_embedding_generation_provider\" in field_value:\n return self.reset_provider_options(build_config)\n\n # Case 3: Update dimension field\n if \"03_embedding_generation_model\" in field_value:\n return self.reset_dimension_field(build_config)\n\n # Initial execution or token/environment change\n first_run = field_name == \"collection_name\" and not field_value and not build_config[\"database_name\"][\"options\"]\n if first_run or field_name in {\"token\", \"environment\"}:\n return self.reset_database_list(build_config)\n\n # Database selection change\n if field_name == \"database_name\" and not isinstance(field_value, dict):\n return self._handle_database_selection(build_config, field_value)\n\n # Keyspace selection change\n if field_name == \"keyspace\":\n return self.reset_collection_list(build_config)\n\n # Collection selection change\n if field_name == \"collection_name\" and not isinstance(field_value, dict):\n return self._handle_collection_selection(build_config, field_value)\n\n # Search method selection change\n if field_name == \"search_method\":\n is_vector_search = field_value == \"Vector Search\"\n is_autodetect = build_config[\"autodetect_collection\"][\"value\"]\n\n # Configure lexical terms (same for both cases)\n build_config[\"lexical_terms\"][\"show\"] = not is_vector_search\n build_config[\"lexical_terms\"][\"value\"] = \"\" if is_vector_search else build_config[\"lexical_terms\"][\"value\"]\n\n # Disable reranker disabling if hybrid search is selected\n build_config[\"reranker\"][\"toggle_disable\"] = not is_vector_search\n build_config[\"reranker\"][\"toggle_value\"] = True\n build_config[\"reranker\"][\"value\"] = build_config[\"reranker\"][\"options\"][0]\n\n # Toggle search type and score threshold based on search method\n build_config[\"search_type\"][\"show\"] = is_vector_search\n build_config[\"search_score_threshold\"][\"show\"] = is_vector_search\n\n # Make sure the search_type is set to \"Similarity\"\n if not is_vector_search or is_autodetect:\n build_config[\"search_type\"][\"value\"] = \"Similarity\"\n\n return build_config\n\n async def _create_new_database(self, build_config: dict, field_value: dict) -> None:\n \"\"\"Create a new database and update build config options.\"\"\"\n try:\n await self.create_database_api(\n new_database_name=field_value[\"01_new_database_name\"],\n token=self.token,\n keyspace=self.get_keyspace(),\n environment=self.environment,\n cloud_provider=field_value[\"02_cloud_provider\"],\n region=field_value[\"03_region\"],\n )\n except Exception as e:\n msg = f\"Error creating database: {e}\"\n raise ValueError(msg) from e\n\n build_config[\"database_name\"][\"options\"].append(field_value[\"01_new_database_name\"])\n build_config[\"database_name\"][\"options_metadata\"].append(\n {\n \"status\": \"PENDING\",\n \"collections\": 0,\n \"api_endpoint\": None,\n \"keyspaces\": [self.get_keyspace()],\n \"org_id\": None,\n }\n )\n\n def _update_cloud_regions(self, build_config: dict, field_value: dict) -> dict:\n \"\"\"Update cloud provider regions in build config.\"\"\"\n env = self.environment\n cloud_provider = field_value[\"02_cloud_provider\"]\n\n # Update the region options based on the selected cloud provider\n template = build_config[\"database_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n template[\"03_region\"][\"options\"] = self.map_cloud_providers()[env][cloud_provider][\"regions\"]\n\n # Reset the the 03_region value if it's not in the new options\n if template[\"03_region\"][\"value\"] not in template[\"03_region\"][\"options\"]:\n template[\"03_region\"][\"value\"] = None\n\n return build_config\n\n async def _create_new_collection(self, build_config: dict, field_value: dict) -> None:\n \"\"\"Create a new collection and update build config options.\"\"\"\n embedding_provider = field_value.get(\"02_embedding_generation_provider\")\n try:\n await self.create_collection_api(\n new_collection_name=field_value[\"01_new_collection_name\"],\n token=self.token,\n api_endpoint=build_config[\"api_endpoint\"][\"value\"],\n environment=self.environment,\n keyspace=self.get_keyspace(),\n dimension=field_value.get(\"04_dimension\") if embedding_provider == \"Bring your own\" else None,\n embedding_generation_provider=embedding_provider,\n embedding_generation_model=field_value.get(\"03_embedding_generation_model\"),\n reranker=self.reranker,\n )\n except Exception as e:\n msg = f\"Error creating collection: {e}\"\n raise ValueError(msg) from e\n\n provider = embedding_provider.lower() if embedding_provider and embedding_provider != \"Bring your own\" else None\n build_config[\"collection_name\"].update(\n {\n \"value\": field_value[\"01_new_collection_name\"],\n \"options\": build_config[\"collection_name\"][\"options\"] + [field_value[\"01_new_collection_name\"]],\n }\n )\n build_config[\"embedding_model\"][\"show\"] = not bool(provider)\n build_config[\"embedding_model\"][\"required\"] = not bool(provider)\n build_config[\"collection_name\"][\"options_metadata\"].append(\n {\n \"records\": 0,\n \"provider\": provider,\n \"icon\": self.get_provider_icon(provider_name=provider),\n \"model\": field_value.get(\"03_embedding_generation_model\"),\n }\n )\n\n # Make sure we always show the reranker options if the collection is hybrid enabled\n # And right now they always are\n build_config[\"lexical_terms\"][\"show\"] = True\n\n def _handle_database_selection(self, build_config: dict, field_value: str) -> dict:\n \"\"\"Handle database selection and update related configurations.\"\"\"\n build_config = self.reset_database_list(build_config)\n\n # Reset collection list if database selection changes\n if field_value not in build_config[\"database_name\"][\"options\"]:\n build_config[\"database_name\"][\"value\"] = \"\"\n return build_config\n\n # Get the api endpoint for the selected database\n index = build_config[\"database_name\"][\"options\"].index(field_value)\n build_config[\"api_endpoint\"][\"value\"] = build_config[\"database_name\"][\"options_metadata\"][index][\"api_endpoint\"]\n\n # Get the org_id for the selected database\n org_id = build_config[\"database_name\"][\"options_metadata\"][index][\"org_id\"]\n if not org_id:\n return build_config\n\n # Update the list of keyspaces based on the db info\n build_config[\"keyspace\"][\"options\"] = build_config[\"database_name\"][\"options_metadata\"][index][\"keyspaces\"]\n build_config[\"keyspace\"][\"value\"] = (\n build_config[\"keyspace\"][\"options\"] and build_config[\"keyspace\"][\"options\"][0]\n if build_config[\"keyspace\"][\"value\"] not in build_config[\"keyspace\"][\"options\"]\n else build_config[\"keyspace\"][\"value\"]\n )\n\n # Get the database id for the selected database\n db_id = self.get_database_id_static(api_endpoint=build_config[\"api_endpoint\"][\"value\"])\n keyspace = self.get_keyspace()\n\n # Update the helper text for the embedding provider field\n template = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n template[\"02_embedding_generation_provider\"][\"helper_text\"] = (\n \"To create collections with more embedding provider options, go to \"\n f''\n \"your database in Astra DB.\"\n )\n\n # Reset provider options\n build_config = self.reset_provider_options(build_config)\n\n # Handle hybrid search options\n build_config = self._handle_hybrid_search_options(build_config)\n\n return self.reset_collection_list(build_config)\n\n def _handle_collection_selection(self, build_config: dict, field_value: str) -> dict:\n \"\"\"Handle collection selection and update embedding options.\"\"\"\n build_config[\"autodetect_collection\"][\"value\"] = True\n build_config = self.reset_collection_list(build_config)\n\n # Reset embedding model if collection selection changes\n if field_value and field_value not in build_config[\"collection_name\"][\"options\"]:\n build_config[\"collection_name\"][\"options\"].append(field_value)\n build_config[\"collection_name\"][\"options_metadata\"].append(\n {\n \"records\": 0,\n \"provider\": None,\n \"icon\": \"vectorstores\",\n \"model\": None,\n }\n )\n build_config[\"autodetect_collection\"][\"value\"] = False\n\n if not field_value:\n return build_config\n\n # Get the selected collection index\n index = build_config[\"collection_name\"][\"options\"].index(field_value)\n\n # Set the provider of the selected collection\n provider = build_config[\"collection_name\"][\"options_metadata\"][index][\"provider\"]\n build_config[\"embedding_model\"][\"show\"] = not bool(provider)\n build_config[\"embedding_model\"][\"required\"] = not bool(provider)\n\n # Grab the collection object\n database = self.get_database_object(api_endpoint=build_config[\"api_endpoint\"][\"value\"])\n collection = database.get_collection(\n name=field_value,\n keyspace=build_config[\"keyspace\"][\"value\"],\n )\n\n # Check if hybrid and lexical are enabled\n col_options = collection.options()\n hyb_enabled = col_options.rerank and col_options.rerank.enabled\n lex_enabled = col_options.lexical and col_options.lexical.enabled\n user_hyb_enabled = build_config[\"search_method\"][\"value\"] == \"Hybrid Search\"\n\n # Show lexical terms if the collection is hybrid enabled\n build_config[\"lexical_terms\"][\"show\"] = hyb_enabled and lex_enabled and user_hyb_enabled\n\n return build_config\n\n @check_cached_vector_store\n def build_vector_store(self):\n try:\n from langchain_astradb import AstraDBVectorStore\n except ImportError as e:\n msg = (\n \"Could not import langchain Astra DB integration package. \"\n \"Please install it with `pip install langchain-astradb`.\"\n )\n raise ImportError(msg) from e\n\n # Get the embedding model and additional params\n embedding_params = {\"embedding\": self.embedding_model} if self.embedding_model else {}\n\n # Get the additional parameters\n additional_params = self.astradb_vectorstore_kwargs or {}\n\n # Get Langflow version and platform information\n __version__ = get_version_info()[\"version\"]\n langflow_prefix = \"\"\n # if os.getenv(\"AWS_EXECUTION_ENV\") == \"AWS_ECS_FARGATE\": # TODO: More precise way of detecting\n # langflow_prefix = \"ds-\"\n\n # Get the database object\n database = self.get_database_object()\n autodetect = self.collection_name in database.list_collection_names() and self.autodetect_collection\n\n # Bundle up the auto-detect parameters\n autodetect_params = {\n \"autodetect_collection\": autodetect,\n \"content_field\": (\n self.content_field\n if self.content_field and embedding_params\n else (\n \"page_content\"\n if embedding_params\n and self.collection_data(collection_name=self.collection_name, database=database) == 0\n else None\n )\n ),\n \"ignore_invalid_documents\": self.ignore_invalid_documents,\n }\n\n # Choose HybridSearchMode based on the selected param\n hybrid_search_mode = HybridSearchMode.DEFAULT if self.search_method == \"Hybrid Search\" else HybridSearchMode.OFF\n\n # Attempt to build the Vector Store object\n try:\n vector_store = AstraDBVectorStore(\n # Astra DB Authentication Parameters\n token=self.token,\n api_endpoint=database.api_endpoint,\n namespace=database.keyspace,\n collection_name=self.collection_name,\n environment=self.environment,\n # Hybrid Search Parameters\n hybrid_search=hybrid_search_mode,\n # Astra DB Usage Tracking Parameters\n ext_callers=[(f\"{langflow_prefix}langflow\", __version__)],\n # Astra DB Vector Store Parameters\n **autodetect_params,\n **embedding_params,\n **additional_params,\n )\n except Exception as e:\n msg = f\"Error initializing AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n # Add documents to the vector store\n self._add_documents_to_vector_store(vector_store)\n\n return vector_store\n\n def _add_documents_to_vector_store(self, vector_store) -> None:\n self.ingest_data = self._prepare_ingest_data()\n\n documents = []\n for _input in self.ingest_data or []:\n if isinstance(_input, Data):\n documents.append(_input.to_lc_document())\n else:\n msg = \"Vector Store Inputs must be Data objects.\"\n raise TypeError(msg)\n\n if documents and self.deletion_field:\n self.log(f\"Deleting documents where {self.deletion_field}\")\n try:\n database = self.get_database_object()\n collection = database.get_collection(self.collection_name, keyspace=database.keyspace)\n delete_values = list({doc.metadata[self.deletion_field] for doc in documents})\n self.log(f\"Deleting documents where {self.deletion_field} matches {delete_values}.\")\n collection.delete_many({f\"metadata.{self.deletion_field}\": {\"$in\": delete_values}})\n except Exception as e:\n msg = f\"Error deleting documents from AstraDBVectorStore based on '{self.deletion_field}': {e}\"\n raise ValueError(msg) from e\n\n if documents:\n self.log(f\"Adding {len(documents)} documents to the Vector Store.\")\n try:\n vector_store.add_documents(documents)\n except Exception as e:\n msg = f\"Error adding documents to AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n else:\n self.log(\"No documents to add to the Vector Store.\")\n\n def _map_search_type(self) -> str:\n search_type_mapping = {\n \"Similarity with score threshold\": \"similarity_score_threshold\",\n \"MMR (Max Marginal Relevance)\": \"mmr\",\n }\n\n return search_type_mapping.get(self.search_type, \"similarity\")\n\n def _build_search_args(self):\n # Clean up the search query\n query = self.search_query if isinstance(self.search_query, str) and self.search_query.strip() else None\n lexical_terms = self.lexical_terms or None\n\n # Check if we have a search query, and if so set the args\n if query:\n args = {\n \"query\": query,\n \"search_type\": self._map_search_type(),\n \"k\": self.number_of_results,\n \"score_threshold\": self.search_score_threshold,\n \"lexical_query\": lexical_terms,\n }\n elif self.advanced_search_filter:\n args = {\n \"n\": self.number_of_results,\n }\n else:\n return {}\n\n filter_arg = self.advanced_search_filter or {}\n if filter_arg:\n args[\"filter\"] = filter_arg\n\n return args\n\n def search_documents(self, vector_store=None) -> list[Data]:\n vector_store = vector_store or self.build_vector_store()\n\n self.log(f\"Search input: {self.search_query}\")\n self.log(f\"Search type: {self.search_type}\")\n self.log(f\"Number of results: {self.number_of_results}\")\n self.log(f\"store.hybrid_search: {vector_store.hybrid_search}\")\n self.log(f\"Lexical terms: {self.lexical_terms}\")\n self.log(f\"Reranker: {self.reranker}\")\n\n try:\n search_args = self._build_search_args()\n except Exception as e:\n msg = f\"Error in AstraDBVectorStore._build_search_args: {e}\"\n raise ValueError(msg) from e\n\n if not search_args:\n self.log(\"No search input or filters provided. Skipping search.\")\n return []\n\n docs = []\n search_method = \"search\" if \"query\" in search_args else \"metadata_search\"\n\n try:\n self.log(f\"Calling vector_store.{search_method} with args: {search_args}\")\n docs = getattr(vector_store, search_method)(**search_args)\n except Exception as e:\n msg = f\"Error performing {search_method} in AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n self.log(f\"Retrieved documents: {len(docs)}\")\n\n data = docs_to_data(docs)\n self.log(f\"Converted documents to data: {len(data)}\")\n self.status = data\n\n return data\n\n def get_retriever_kwargs(self):\n search_args = self._build_search_args()\n\n return {\n \"search_type\": self._map_search_type(),\n \"search_kwargs\": search_args,\n }\n"
},
"collection_name": {
"_input_type": "DropdownInput",
@@ -2094,7 +2094,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.custom import Component\nfrom langflow.helpers.data import safe_convert\nfrom langflow.io import (\n BoolInput,\n HandleInput,\n MessageTextInput,\n MultilineInput,\n Output,\n TabInput,\n)\nfrom langflow.schema import Data, DataFrame\nfrom langflow.schema.message import Message\n\n\nclass ParserComponent(Component):\n display_name = \"Parser\"\n description = \"Extracts text using a template.\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"input_data\",\n display_name=\"Data or DataFrame\",\n input_types=[\"DataFrame\", \"Data\"],\n info=\"Accepts either a DataFrame or a Data object.\",\n required=True,\n ),\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Parser\", \"Stringify\"],\n value=\"Parser\",\n info=\"Convert into raw string instead of using a template.\",\n real_time_refresh=True,\n ),\n MultilineInput(\n name=\"pattern\",\n display_name=\"Template\",\n info=(\n \"Use variables within curly brackets to extract column values for DataFrames \"\n \"or key values for Data.\"\n \"For example: `Name: {Name}, Age: {Age}, Country: {Country}`\"\n ),\n value=\"Text: {text}\", # Example default\n dynamic=True,\n show=True,\n required=True,\n ),\n MessageTextInput(\n name=\"sep\",\n display_name=\"Separator\",\n advanced=True,\n value=\"\\n\",\n info=\"String used to separate rows/items.\",\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"Parsed Text\",\n name=\"parsed_text\",\n info=\"Formatted text output.\",\n method=\"parse_combined_text\",\n ),\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n \"\"\"Dynamically hide/show `template` and enforce requirement based on `stringify`.\"\"\"\n if field_name == \"mode\":\n build_config[\"pattern\"][\"show\"] = self.mode == \"Parser\"\n build_config[\"pattern\"][\"required\"] = self.mode == \"Parser\"\n if field_value:\n clean_data = BoolInput(\n name=\"clean_data\",\n display_name=\"Clean Data\",\n info=(\n \"Enable to clean the data by removing empty rows and lines \"\n \"in each cell of the DataFrame/ Data object.\"\n ),\n value=True,\n advanced=True,\n required=False,\n )\n build_config[\"clean_data\"] = clean_data.to_dict()\n else:\n build_config.pop(\"clean_data\", None)\n\n return build_config\n\n def _clean_args(self):\n \"\"\"Prepare arguments based on input type.\"\"\"\n input_data = self.input_data\n\n match input_data:\n case list() if all(isinstance(item, Data) for item in input_data):\n msg = \"List of Data objects is not supported.\"\n raise ValueError(msg)\n case DataFrame():\n return input_data, None\n case Data():\n return None, input_data\n case dict() if \"data\" in input_data:\n try:\n if \"columns\" in input_data: # Likely a DataFrame\n return DataFrame.from_dict(input_data), None\n # Likely a Data object\n return None, Data(**input_data)\n except (TypeError, ValueError, KeyError) as e:\n msg = f\"Invalid structured input provided: {e!s}\"\n raise ValueError(msg) from e\n case _:\n msg = f\"Unsupported input type: {type(input_data)}. Expected DataFrame or Data.\"\n raise ValueError(msg)\n\n def parse_combined_text(self) -> Message:\n \"\"\"Parse all rows/items into a single text or convert input to string if `stringify` is enabled.\"\"\"\n # Early return for stringify option\n if self.mode == \"Stringify\":\n return self.convert_to_string()\n\n df, data = self._clean_args()\n\n lines = []\n if df is not None:\n for _, row in df.iterrows():\n formatted_text = self.pattern.format(**row.to_dict())\n lines.append(formatted_text)\n elif data is not None:\n formatted_text = self.pattern.format(**data.data)\n lines.append(formatted_text)\n\n combined_text = self.sep.join(lines)\n self.status = combined_text\n return Message(text=combined_text)\n\n def convert_to_string(self) -> Message:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n result = \"\"\n if isinstance(self.input_data, list):\n result = \"\\n\".join([safe_convert(item, clean_data=self.clean_data or False) for item in self.input_data])\n else:\n result = safe_convert(self.input_data or False)\n self.log(f\"Converted to string with length: {len(result)}\")\n\n message = Message(text=result)\n self.status = message\n return message\n"
+ "value": "from langflow.custom.custom_component.component import Component\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, HandleInput, MessageTextInput, MultilineInput, TabInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.template.field.base import Output\n\n\nclass ParserComponent(Component):\n display_name = \"Parser\"\n description = \"Extracts text using a template.\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"input_data\",\n display_name=\"Data or DataFrame\",\n input_types=[\"DataFrame\", \"Data\"],\n info=\"Accepts either a DataFrame or a Data object.\",\n required=True,\n ),\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Parser\", \"Stringify\"],\n value=\"Parser\",\n info=\"Convert into raw string instead of using a template.\",\n real_time_refresh=True,\n ),\n MultilineInput(\n name=\"pattern\",\n display_name=\"Template\",\n info=(\n \"Use variables within curly brackets to extract column values for DataFrames \"\n \"or key values for Data.\"\n \"For example: `Name: {Name}, Age: {Age}, Country: {Country}`\"\n ),\n value=\"Text: {text}\", # Example default\n dynamic=True,\n show=True,\n required=True,\n ),\n MessageTextInput(\n name=\"sep\",\n display_name=\"Separator\",\n advanced=True,\n value=\"\\n\",\n info=\"String used to separate rows/items.\",\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"Parsed Text\",\n name=\"parsed_text\",\n info=\"Formatted text output.\",\n method=\"parse_combined_text\",\n ),\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n \"\"\"Dynamically hide/show `template` and enforce requirement based on `stringify`.\"\"\"\n if field_name == \"mode\":\n build_config[\"pattern\"][\"show\"] = self.mode == \"Parser\"\n build_config[\"pattern\"][\"required\"] = self.mode == \"Parser\"\n if field_value:\n clean_data = BoolInput(\n name=\"clean_data\",\n display_name=\"Clean Data\",\n info=(\n \"Enable to clean the data by removing empty rows and lines \"\n \"in each cell of the DataFrame/ Data object.\"\n ),\n value=True,\n advanced=True,\n required=False,\n )\n build_config[\"clean_data\"] = clean_data.to_dict()\n else:\n build_config.pop(\"clean_data\", None)\n\n return build_config\n\n def _clean_args(self):\n \"\"\"Prepare arguments based on input type.\"\"\"\n input_data = self.input_data\n\n match input_data:\n case list() if all(isinstance(item, Data) for item in input_data):\n msg = \"List of Data objects is not supported.\"\n raise ValueError(msg)\n case DataFrame():\n return input_data, None\n case Data():\n return None, input_data\n case dict() if \"data\" in input_data:\n try:\n if \"columns\" in input_data: # Likely a DataFrame\n return DataFrame.from_dict(input_data), None\n # Likely a Data object\n return None, Data(**input_data)\n except (TypeError, ValueError, KeyError) as e:\n msg = f\"Invalid structured input provided: {e!s}\"\n raise ValueError(msg) from e\n case _:\n msg = f\"Unsupported input type: {type(input_data)}. Expected DataFrame or Data.\"\n raise ValueError(msg)\n\n def parse_combined_text(self) -> Message:\n \"\"\"Parse all rows/items into a single text or convert input to string if `stringify` is enabled.\"\"\"\n # Early return for stringify option\n if self.mode == \"Stringify\":\n return self.convert_to_string()\n\n df, data = self._clean_args()\n\n lines = []\n if df is not None:\n for _, row in df.iterrows():\n formatted_text = self.pattern.format(**row.to_dict())\n lines.append(formatted_text)\n elif data is not None:\n formatted_text = self.pattern.format(**data.data)\n lines.append(formatted_text)\n\n combined_text = self.sep.join(lines)\n self.status = combined_text\n return Message(text=combined_text)\n\n def convert_to_string(self) -> Message:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n result = \"\"\n if isinstance(self.input_data, list):\n result = \"\\n\".join([safe_convert(item, clean_data=self.clean_data or False) for item in self.input_data])\n else:\n result = safe_convert(self.input_data or False)\n self.log(f\"Converted to string with length: {len(result)}\")\n\n message = Message(text=result)\n self.status = message\n return message\n"
},
"input_data": {
"_input_type": "HandleInput",
@@ -2337,7 +2337,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -2575,7 +2575,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.custom import Component\nfrom langflow.helpers.data import safe_convert\nfrom langflow.io import (\n BoolInput,\n HandleInput,\n MessageTextInput,\n MultilineInput,\n Output,\n TabInput,\n)\nfrom langflow.schema import Data, DataFrame\nfrom langflow.schema.message import Message\n\n\nclass ParserComponent(Component):\n display_name = \"Parser\"\n description = \"Extracts text using a template.\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"input_data\",\n display_name=\"Data or DataFrame\",\n input_types=[\"DataFrame\", \"Data\"],\n info=\"Accepts either a DataFrame or a Data object.\",\n required=True,\n ),\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Parser\", \"Stringify\"],\n value=\"Parser\",\n info=\"Convert into raw string instead of using a template.\",\n real_time_refresh=True,\n ),\n MultilineInput(\n name=\"pattern\",\n display_name=\"Template\",\n info=(\n \"Use variables within curly brackets to extract column values for DataFrames \"\n \"or key values for Data.\"\n \"For example: `Name: {Name}, Age: {Age}, Country: {Country}`\"\n ),\n value=\"Text: {text}\", # Example default\n dynamic=True,\n show=True,\n required=True,\n ),\n MessageTextInput(\n name=\"sep\",\n display_name=\"Separator\",\n advanced=True,\n value=\"\\n\",\n info=\"String used to separate rows/items.\",\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"Parsed Text\",\n name=\"parsed_text\",\n info=\"Formatted text output.\",\n method=\"parse_combined_text\",\n ),\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n \"\"\"Dynamically hide/show `template` and enforce requirement based on `stringify`.\"\"\"\n if field_name == \"mode\":\n build_config[\"pattern\"][\"show\"] = self.mode == \"Parser\"\n build_config[\"pattern\"][\"required\"] = self.mode == \"Parser\"\n if field_value:\n clean_data = BoolInput(\n name=\"clean_data\",\n display_name=\"Clean Data\",\n info=(\n \"Enable to clean the data by removing empty rows and lines \"\n \"in each cell of the DataFrame/ Data object.\"\n ),\n value=True,\n advanced=True,\n required=False,\n )\n build_config[\"clean_data\"] = clean_data.to_dict()\n else:\n build_config.pop(\"clean_data\", None)\n\n return build_config\n\n def _clean_args(self):\n \"\"\"Prepare arguments based on input type.\"\"\"\n input_data = self.input_data\n\n match input_data:\n case list() if all(isinstance(item, Data) for item in input_data):\n msg = \"List of Data objects is not supported.\"\n raise ValueError(msg)\n case DataFrame():\n return input_data, None\n case Data():\n return None, input_data\n case dict() if \"data\" in input_data:\n try:\n if \"columns\" in input_data: # Likely a DataFrame\n return DataFrame.from_dict(input_data), None\n # Likely a Data object\n return None, Data(**input_data)\n except (TypeError, ValueError, KeyError) as e:\n msg = f\"Invalid structured input provided: {e!s}\"\n raise ValueError(msg) from e\n case _:\n msg = f\"Unsupported input type: {type(input_data)}. Expected DataFrame or Data.\"\n raise ValueError(msg)\n\n def parse_combined_text(self) -> Message:\n \"\"\"Parse all rows/items into a single text or convert input to string if `stringify` is enabled.\"\"\"\n # Early return for stringify option\n if self.mode == \"Stringify\":\n return self.convert_to_string()\n\n df, data = self._clean_args()\n\n lines = []\n if df is not None:\n for _, row in df.iterrows():\n formatted_text = self.pattern.format(**row.to_dict())\n lines.append(formatted_text)\n elif data is not None:\n formatted_text = self.pattern.format(**data.data)\n lines.append(formatted_text)\n\n combined_text = self.sep.join(lines)\n self.status = combined_text\n return Message(text=combined_text)\n\n def convert_to_string(self) -> Message:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n result = \"\"\n if isinstance(self.input_data, list):\n result = \"\\n\".join([safe_convert(item, clean_data=self.clean_data or False) for item in self.input_data])\n else:\n result = safe_convert(self.input_data or False)\n self.log(f\"Converted to string with length: {len(result)}\")\n\n message = Message(text=result)\n self.status = message\n return message\n"
+ "value": "from langflow.custom.custom_component.component import Component\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, HandleInput, MessageTextInput, MultilineInput, TabInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.template.field.base import Output\n\n\nclass ParserComponent(Component):\n display_name = \"Parser\"\n description = \"Extracts text using a template.\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"input_data\",\n display_name=\"Data or DataFrame\",\n input_types=[\"DataFrame\", \"Data\"],\n info=\"Accepts either a DataFrame or a Data object.\",\n required=True,\n ),\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Parser\", \"Stringify\"],\n value=\"Parser\",\n info=\"Convert into raw string instead of using a template.\",\n real_time_refresh=True,\n ),\n MultilineInput(\n name=\"pattern\",\n display_name=\"Template\",\n info=(\n \"Use variables within curly brackets to extract column values for DataFrames \"\n \"or key values for Data.\"\n \"For example: `Name: {Name}, Age: {Age}, Country: {Country}`\"\n ),\n value=\"Text: {text}\", # Example default\n dynamic=True,\n show=True,\n required=True,\n ),\n MessageTextInput(\n name=\"sep\",\n display_name=\"Separator\",\n advanced=True,\n value=\"\\n\",\n info=\"String used to separate rows/items.\",\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"Parsed Text\",\n name=\"parsed_text\",\n info=\"Formatted text output.\",\n method=\"parse_combined_text\",\n ),\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n \"\"\"Dynamically hide/show `template` and enforce requirement based on `stringify`.\"\"\"\n if field_name == \"mode\":\n build_config[\"pattern\"][\"show\"] = self.mode == \"Parser\"\n build_config[\"pattern\"][\"required\"] = self.mode == \"Parser\"\n if field_value:\n clean_data = BoolInput(\n name=\"clean_data\",\n display_name=\"Clean Data\",\n info=(\n \"Enable to clean the data by removing empty rows and lines \"\n \"in each cell of the DataFrame/ Data object.\"\n ),\n value=True,\n advanced=True,\n required=False,\n )\n build_config[\"clean_data\"] = clean_data.to_dict()\n else:\n build_config.pop(\"clean_data\", None)\n\n return build_config\n\n def _clean_args(self):\n \"\"\"Prepare arguments based on input type.\"\"\"\n input_data = self.input_data\n\n match input_data:\n case list() if all(isinstance(item, Data) for item in input_data):\n msg = \"List of Data objects is not supported.\"\n raise ValueError(msg)\n case DataFrame():\n return input_data, None\n case Data():\n return None, input_data\n case dict() if \"data\" in input_data:\n try:\n if \"columns\" in input_data: # Likely a DataFrame\n return DataFrame.from_dict(input_data), None\n # Likely a Data object\n return None, Data(**input_data)\n except (TypeError, ValueError, KeyError) as e:\n msg = f\"Invalid structured input provided: {e!s}\"\n raise ValueError(msg) from e\n case _:\n msg = f\"Unsupported input type: {type(input_data)}. Expected DataFrame or Data.\"\n raise ValueError(msg)\n\n def parse_combined_text(self) -> Message:\n \"\"\"Parse all rows/items into a single text or convert input to string if `stringify` is enabled.\"\"\"\n # Early return for stringify option\n if self.mode == \"Stringify\":\n return self.convert_to_string()\n\n df, data = self._clean_args()\n\n lines = []\n if df is not None:\n for _, row in df.iterrows():\n formatted_text = self.pattern.format(**row.to_dict())\n lines.append(formatted_text)\n elif data is not None:\n formatted_text = self.pattern.format(**data.data)\n lines.append(formatted_text)\n\n combined_text = self.sep.join(lines)\n self.status = combined_text\n return Message(text=combined_text)\n\n def convert_to_string(self) -> Message:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n result = \"\"\n if isinstance(self.input_data, list):\n result = \"\\n\".join([safe_convert(item, clean_data=self.clean_data or False) for item in self.input_data])\n else:\n result = safe_convert(self.input_data or False)\n self.log(f\"Converted to string with length: {len(result)}\")\n\n message = Message(text=result)\n self.status = message\n return message\n"
},
"input_data": {
"_input_type": "HandleInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Image Sentiment Analysis.json b/src/backend/base/langflow/initial_setup/starter_projects/Image Sentiment Analysis.json
index 2151cd767..613bf600e 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Image Sentiment Analysis.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Image Sentiment Analysis.json
@@ -286,7 +286,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
@@ -609,7 +609,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -880,7 +880,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"template": {
"_input_type": "PromptInput",
@@ -1061,7 +1061,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
+ "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
},
"input_value": {
"_input_type": "MessageInput",
@@ -1422,7 +1422,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from pydantic import BaseModel, Field, create_model\nfrom trustcall import create_extractor\n\nfrom langflow.base.models.chat_result import get_chat_result\nfrom langflow.custom import Component\nfrom langflow.helpers.base_model import build_model_from_schema\nfrom langflow.io import (\n BoolInput,\n HandleInput,\n MessageTextInput,\n MultilineInput,\n Output,\n TableInput,\n)\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.table import EditMode\n\n\nclass StructuredOutputComponent(Component):\n display_name = \"Structured Output\"\n description = (\n \"Transforms LLM responses into **structured data formats**. Ideal for extracting specific information \"\n \"or creating consistent outputs.\"\n )\n name = \"StructuredOutput\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"llm\",\n display_name=\"Language Model\",\n info=\"The language model to use to generate the structured output.\",\n input_types=[\"LanguageModel\"],\n required=True,\n ),\n MessageTextInput(\n name=\"input_value\",\n display_name=\"Input Message\",\n info=\"The input message to the language model.\",\n tool_mode=True,\n required=True,\n ),\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Format Instructions\",\n info=\"The instructions to the language model for formatting the output.\",\n value=(\n \"You are an AI system designed to extract structured information from unstructured text.\"\n \"Given the input_text, return a JSON object with predefined keys based on the expected structure.\"\n \"Extract values accurately and format them according to the specified type \"\n \"(e.g., string, integer, float, date).\"\n \"If a value is missing or cannot be determined, return a default \"\n \"(e.g., null, 0, or 'N/A').\"\n \"If multiple instances of the expected structure exist within the input_text, \"\n \"stream each as a separate JSON object.\"\n ),\n required=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"schema_name\",\n display_name=\"Schema Name\",\n info=\"Provide a name for the output data schema.\",\n advanced=True,\n ),\n TableInput(\n name=\"output_schema\",\n display_name=\"Output Schema\",\n info=\"Define the structure and data types for the model's output.\",\n required=True,\n # TODO: remove deault value\n table_schema=[\n {\n \"name\": \"name\",\n \"display_name\": \"Name\",\n \"type\": \"str\",\n \"description\": \"Specify the name of the output field.\",\n \"default\": \"field\",\n \"edit_mode\": EditMode.INLINE,\n },\n {\n \"name\": \"description\",\n \"display_name\": \"Description\",\n \"type\": \"str\",\n \"description\": \"Describe the purpose of the output field.\",\n \"default\": \"description of field\",\n \"edit_mode\": EditMode.POPOVER,\n },\n {\n \"name\": \"type\",\n \"display_name\": \"Type\",\n \"type\": \"str\",\n \"edit_mode\": EditMode.INLINE,\n \"description\": (\"Indicate the data type of the output field (e.g., str, int, float, bool, dict).\"),\n \"options\": [\"str\", \"int\", \"float\", \"bool\", \"dict\"],\n \"default\": \"str\",\n },\n ],\n value=[\n {\n \"name\": \"field\",\n \"description\": \"description of field\",\n \"type\": \"str\",\n \"multiple\": \"False\",\n }\n ],\n ),\n BoolInput(\n name=\"multiple\",\n advanced=True,\n display_name=\"Generate Multiple\",\n info=\"[Deprecated] Always set to True\",\n value=True,\n ),\n ]\n\n outputs = [\n Output(\n name=\"structured_output\",\n display_name=\"Structured Output\",\n method=\"build_structured_output\",\n ),\n Output(\n name=\"structured_output_dataframe\",\n display_name=\"DataFrame\",\n method=\"as_dataframe\",\n ),\n ]\n\n def build_structured_output_base(self) -> Data:\n schema_name = self.schema_name or \"OutputModel\"\n\n if not hasattr(self.llm, \"with_structured_output\"):\n msg = \"Language model does not support structured output.\"\n raise TypeError(msg)\n if not self.output_schema:\n msg = \"Output schema cannot be empty\"\n raise ValueError(msg)\n\n output_model_ = build_model_from_schema(self.output_schema)\n\n output_model = create_model(\n schema_name,\n __doc__=f\"A list of {schema_name}.\",\n objects=(list[output_model_], Field(description=f\"A list of {schema_name}.\")), # type: ignore[valid-type]\n )\n\n try:\n llm_with_structured_output = create_extractor(self.llm, tools=[output_model])\n except NotImplementedError as exc:\n msg = f\"{self.llm.__class__.__name__} does not support structured output.\"\n raise TypeError(msg) from exc\n config_dict = {\n \"run_name\": self.display_name,\n \"project_name\": self.get_project_name(),\n \"callbacks\": self.get_langchain_callbacks(),\n }\n result = get_chat_result(\n runnable=llm_with_structured_output,\n system_message=self.system_prompt,\n input_value=self.input_value,\n config=config_dict,\n )\n if isinstance(result, BaseModel):\n result = result.model_dump()\n if responses := result.get(\"responses\"):\n result = responses[0].model_dump()\n if result and \"objects\" in result:\n return result[\"objects\"]\n\n return result\n\n def build_structured_output(self) -> Data:\n output = self.build_structured_output_base()\n\n return Data(text_key=\"results\", data={\"results\": output})\n\n def as_dataframe(self) -> DataFrame:\n output = self.build_structured_output_base()\n if isinstance(output, list):\n return DataFrame(data=output)\n return DataFrame(data=[output])\n"
+ "value": "from pydantic import BaseModel, Field, create_model\nfrom trustcall import create_extractor\n\nfrom langflow.base.models.chat_result import get_chat_result\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.helpers.base_model import build_model_from_schema\nfrom langflow.io import (\n BoolInput,\n HandleInput,\n MessageTextInput,\n MultilineInput,\n Output,\n TableInput,\n)\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.table import EditMode\n\n\nclass StructuredOutputComponent(Component):\n display_name = \"Structured Output\"\n description = (\n \"Transforms LLM responses into **structured data formats**. Ideal for extracting specific information \"\n \"or creating consistent outputs.\"\n )\n name = \"StructuredOutput\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"llm\",\n display_name=\"Language Model\",\n info=\"The language model to use to generate the structured output.\",\n input_types=[\"LanguageModel\"],\n required=True,\n ),\n MessageTextInput(\n name=\"input_value\",\n display_name=\"Input Message\",\n info=\"The input message to the language model.\",\n tool_mode=True,\n required=True,\n ),\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Format Instructions\",\n info=\"The instructions to the language model for formatting the output.\",\n value=(\n \"You are an AI system designed to extract structured information from unstructured text.\"\n \"Given the input_text, return a JSON object with predefined keys based on the expected structure.\"\n \"Extract values accurately and format them according to the specified type \"\n \"(e.g., string, integer, float, date).\"\n \"If a value is missing or cannot be determined, return a default \"\n \"(e.g., null, 0, or 'N/A').\"\n \"If multiple instances of the expected structure exist within the input_text, \"\n \"stream each as a separate JSON object.\"\n ),\n required=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"schema_name\",\n display_name=\"Schema Name\",\n info=\"Provide a name for the output data schema.\",\n advanced=True,\n ),\n TableInput(\n name=\"output_schema\",\n display_name=\"Output Schema\",\n info=\"Define the structure and data types for the model's output.\",\n required=True,\n # TODO: remove deault value\n table_schema=[\n {\n \"name\": \"name\",\n \"display_name\": \"Name\",\n \"type\": \"str\",\n \"description\": \"Specify the name of the output field.\",\n \"default\": \"field\",\n \"edit_mode\": EditMode.INLINE,\n },\n {\n \"name\": \"description\",\n \"display_name\": \"Description\",\n \"type\": \"str\",\n \"description\": \"Describe the purpose of the output field.\",\n \"default\": \"description of field\",\n \"edit_mode\": EditMode.POPOVER,\n },\n {\n \"name\": \"type\",\n \"display_name\": \"Type\",\n \"type\": \"str\",\n \"edit_mode\": EditMode.INLINE,\n \"description\": (\"Indicate the data type of the output field (e.g., str, int, float, bool, dict).\"),\n \"options\": [\"str\", \"int\", \"float\", \"bool\", \"dict\"],\n \"default\": \"str\",\n },\n ],\n value=[\n {\n \"name\": \"field\",\n \"description\": \"description of field\",\n \"type\": \"str\",\n \"multiple\": \"False\",\n }\n ],\n ),\n BoolInput(\n name=\"multiple\",\n advanced=True,\n display_name=\"Generate Multiple\",\n info=\"[Deprecated] Always set to True\",\n value=True,\n ),\n ]\n\n outputs = [\n Output(\n name=\"structured_output\",\n display_name=\"Structured Output\",\n method=\"build_structured_output\",\n ),\n Output(\n name=\"structured_output_dataframe\",\n display_name=\"DataFrame\",\n method=\"as_dataframe\",\n ),\n ]\n\n def build_structured_output_base(self) -> Data:\n schema_name = self.schema_name or \"OutputModel\"\n\n if not hasattr(self.llm, \"with_structured_output\"):\n msg = \"Language model does not support structured output.\"\n raise TypeError(msg)\n if not self.output_schema:\n msg = \"Output schema cannot be empty\"\n raise ValueError(msg)\n\n output_model_ = build_model_from_schema(self.output_schema)\n\n output_model = create_model(\n schema_name,\n __doc__=f\"A list of {schema_name}.\",\n objects=(list[output_model_], Field(description=f\"A list of {schema_name}.\")), # type: ignore[valid-type]\n )\n\n try:\n llm_with_structured_output = create_extractor(self.llm, tools=[output_model])\n except NotImplementedError as exc:\n msg = f\"{self.llm.__class__.__name__} does not support structured output.\"\n raise TypeError(msg) from exc\n config_dict = {\n \"run_name\": self.display_name,\n \"project_name\": self.get_project_name(),\n \"callbacks\": self.get_langchain_callbacks(),\n }\n result = get_chat_result(\n runnable=llm_with_structured_output,\n system_message=self.system_prompt,\n input_value=self.input_value,\n config=config_dict,\n )\n if isinstance(result, BaseModel):\n result = result.model_dump()\n if responses := result.get(\"responses\"):\n result = responses[0].model_dump()\n if result and \"objects\" in result:\n return result[\"objects\"]\n\n return result\n\n def build_structured_output(self) -> Data:\n output = self.build_structured_output_base()\n\n return Data(text_key=\"results\", data={\"results\": output})\n\n def as_dataframe(self) -> DataFrame:\n output = self.build_structured_output_base()\n if isinstance(output, list):\n return DataFrame(data=output)\n return DataFrame(data=[output])\n"
},
"input_value": {
"_input_type": "MessageTextInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Instagram Copywriter.json b/src/backend/base/langflow/initial_setup/starter_projects/Instagram Copywriter.json
index 9d50f7fda..8065ef2f6 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Instagram Copywriter.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Instagram Copywriter.json
@@ -398,7 +398,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
@@ -650,7 +650,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"context": {
"advanced": false,
@@ -931,7 +931,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"post": {
"advanced": false,
@@ -1149,7 +1149,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -1493,7 +1493,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
+ "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
},
"handle_parsing_errors": {
"_input_type": "BoolInput",
@@ -2054,7 +2054,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"image_description": {
"advanced": false,
@@ -2435,7 +2435,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import httpx\nfrom loguru import logger\n\nfrom langflow.custom import Component\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, Output, SecretStrInput\nfrom langflow.schema import Data\nfrom langflow.schema.dataframe import DataFrame\n\n\nclass TavilySearchComponent(Component):\n display_name = \"Tavily Search API\"\n description = \"\"\"**Tavily Search** is a search engine optimized for LLMs and RAG, \\\n aimed at efficient, quick, and persistent search results.\"\"\"\n icon = \"TavilyIcon\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Tavily API Key\",\n required=True,\n info=\"Your Tavily API Key.\",\n ),\n MessageTextInput(\n name=\"query\",\n display_name=\"Search Query\",\n info=\"The search query you want to execute with Tavily.\",\n tool_mode=True,\n ),\n DropdownInput(\n name=\"search_depth\",\n display_name=\"Search Depth\",\n info=\"The depth of the search.\",\n options=[\"basic\", \"advanced\"],\n value=\"advanced\",\n advanced=True,\n ),\n IntInput(\n name=\"chunks_per_source\",\n display_name=\"Chunks Per Source\",\n info=(\"The number of content chunks to retrieve from each source (1-3). Only works with advanced search.\"),\n value=3,\n advanced=True,\n ),\n DropdownInput(\n name=\"topic\",\n display_name=\"Search Topic\",\n info=\"The category of the search.\",\n options=[\"general\", \"news\"],\n value=\"general\",\n advanced=True,\n ),\n IntInput(\n name=\"days\",\n display_name=\"Days\",\n info=\"Number of days back from current date to include. Only available with news topic.\",\n value=7,\n advanced=True,\n ),\n IntInput(\n name=\"max_results\",\n display_name=\"Max Results\",\n info=\"The maximum number of search results to return.\",\n value=5,\n advanced=True,\n ),\n BoolInput(\n name=\"include_answer\",\n display_name=\"Include Answer\",\n info=\"Include a short answer to original query.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"time_range\",\n display_name=\"Time Range\",\n info=\"The time range back from the current date to filter results.\",\n options=[\"day\", \"week\", \"month\", \"year\"],\n value=None, # Default to None to make it optional\n advanced=True,\n ),\n BoolInput(\n name=\"include_images\",\n display_name=\"Include Images\",\n info=\"Include a list of query-related images in the response.\",\n value=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"include_domains\",\n display_name=\"Include Domains\",\n info=\"Comma-separated list of domains to include in the search results.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"exclude_domains\",\n display_name=\"Exclude Domains\",\n info=\"Comma-separated list of domains to exclude from the search results.\",\n advanced=True,\n ),\n BoolInput(\n name=\"include_raw_content\",\n display_name=\"Include Raw Content\",\n info=\"Include the cleaned and parsed HTML content of each search result.\",\n value=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"fetch_content_dataframe\"),\n ]\n\n def fetch_content(self) -> list[Data]:\n try:\n # Only process domains if they're provided\n include_domains = None\n exclude_domains = None\n\n if self.include_domains:\n include_domains = [domain.strip() for domain in self.include_domains.split(\",\") if domain.strip()]\n\n if self.exclude_domains:\n exclude_domains = [domain.strip() for domain in self.exclude_domains.split(\",\") if domain.strip()]\n\n url = \"https://api.tavily.com/search\"\n headers = {\n \"content-type\": \"application/json\",\n \"accept\": \"application/json\",\n }\n\n payload = {\n \"api_key\": self.api_key,\n \"query\": self.query,\n \"search_depth\": self.search_depth,\n \"topic\": self.topic,\n \"max_results\": self.max_results,\n \"include_images\": self.include_images,\n \"include_answer\": self.include_answer,\n \"include_raw_content\": self.include_raw_content,\n \"days\": self.days,\n \"time_range\": self.time_range,\n }\n\n # Only add domains to payload if they exist and have values\n if include_domains:\n payload[\"include_domains\"] = include_domains\n if exclude_domains:\n payload[\"exclude_domains\"] = exclude_domains\n\n # Add conditional parameters only if they should be included\n if self.search_depth == \"advanced\" and self.chunks_per_source:\n payload[\"chunks_per_source\"] = self.chunks_per_source\n\n if self.topic == \"news\" and self.days:\n payload[\"days\"] = int(self.days) # Ensure days is an integer\n\n # Add time_range if it's set\n if hasattr(self, \"time_range\") and self.time_range:\n payload[\"time_range\"] = self.time_range\n\n # Add timeout handling\n with httpx.Client(timeout=90.0) as client:\n response = client.post(url, json=payload, headers=headers)\n\n response.raise_for_status()\n search_results = response.json()\n\n data_results = []\n\n if self.include_answer and search_results.get(\"answer\"):\n data_results.append(Data(text=search_results[\"answer\"]))\n\n for result in search_results.get(\"results\", []):\n content = result.get(\"content\", \"\")\n result_data = {\n \"title\": result.get(\"title\"),\n \"url\": result.get(\"url\"),\n \"content\": content,\n \"score\": result.get(\"score\"),\n }\n if self.include_raw_content:\n result_data[\"raw_content\"] = result.get(\"raw_content\")\n\n data_results.append(Data(text=content, data=result_data))\n\n if self.include_images and search_results.get(\"images\"):\n data_results.append(Data(text=\"Images found\", data={\"images\": search_results[\"images\"]}))\n\n except httpx.TimeoutException:\n error_message = \"Request timed out (90s). Please try again or adjust parameters.\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.HTTPStatusError as exc:\n error_message = f\"HTTP error occurred: {exc.response.status_code} - {exc.response.text}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.RequestError as exc:\n error_message = f\"Request error occurred: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except ValueError as exc:\n error_message = f\"Invalid response format: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n else:\n self.status = data_results\n return data_results\n\n def fetch_content_dataframe(self) -> DataFrame:\n data = self.fetch_content()\n return DataFrame(data)\n"
+ "value": "import httpx\nfrom loguru import logger\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.template.field.base import Output\n\n\nclass TavilySearchComponent(Component):\n display_name = \"Tavily Search API\"\n description = \"\"\"**Tavily Search** is a search engine optimized for LLMs and RAG, \\\n aimed at efficient, quick, and persistent search results.\"\"\"\n icon = \"TavilyIcon\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Tavily API Key\",\n required=True,\n info=\"Your Tavily API Key.\",\n ),\n MessageTextInput(\n name=\"query\",\n display_name=\"Search Query\",\n info=\"The search query you want to execute with Tavily.\",\n tool_mode=True,\n ),\n DropdownInput(\n name=\"search_depth\",\n display_name=\"Search Depth\",\n info=\"The depth of the search.\",\n options=[\"basic\", \"advanced\"],\n value=\"advanced\",\n advanced=True,\n ),\n IntInput(\n name=\"chunks_per_source\",\n display_name=\"Chunks Per Source\",\n info=(\"The number of content chunks to retrieve from each source (1-3). Only works with advanced search.\"),\n value=3,\n advanced=True,\n ),\n DropdownInput(\n name=\"topic\",\n display_name=\"Search Topic\",\n info=\"The category of the search.\",\n options=[\"general\", \"news\"],\n value=\"general\",\n advanced=True,\n ),\n IntInput(\n name=\"days\",\n display_name=\"Days\",\n info=\"Number of days back from current date to include. Only available with news topic.\",\n value=7,\n advanced=True,\n ),\n IntInput(\n name=\"max_results\",\n display_name=\"Max Results\",\n info=\"The maximum number of search results to return.\",\n value=5,\n advanced=True,\n ),\n BoolInput(\n name=\"include_answer\",\n display_name=\"Include Answer\",\n info=\"Include a short answer to original query.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"time_range\",\n display_name=\"Time Range\",\n info=\"The time range back from the current date to filter results.\",\n options=[\"day\", \"week\", \"month\", \"year\"],\n value=None, # Default to None to make it optional\n advanced=True,\n ),\n BoolInput(\n name=\"include_images\",\n display_name=\"Include Images\",\n info=\"Include a list of query-related images in the response.\",\n value=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"include_domains\",\n display_name=\"Include Domains\",\n info=\"Comma-separated list of domains to include in the search results.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"exclude_domains\",\n display_name=\"Exclude Domains\",\n info=\"Comma-separated list of domains to exclude from the search results.\",\n advanced=True,\n ),\n BoolInput(\n name=\"include_raw_content\",\n display_name=\"Include Raw Content\",\n info=\"Include the cleaned and parsed HTML content of each search result.\",\n value=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"fetch_content_dataframe\"),\n ]\n\n def fetch_content(self) -> list[Data]:\n try:\n # Only process domains if they're provided\n include_domains = None\n exclude_domains = None\n\n if self.include_domains:\n include_domains = [domain.strip() for domain in self.include_domains.split(\",\") if domain.strip()]\n\n if self.exclude_domains:\n exclude_domains = [domain.strip() for domain in self.exclude_domains.split(\",\") if domain.strip()]\n\n url = \"https://api.tavily.com/search\"\n headers = {\n \"content-type\": \"application/json\",\n \"accept\": \"application/json\",\n }\n\n payload = {\n \"api_key\": self.api_key,\n \"query\": self.query,\n \"search_depth\": self.search_depth,\n \"topic\": self.topic,\n \"max_results\": self.max_results,\n \"include_images\": self.include_images,\n \"include_answer\": self.include_answer,\n \"include_raw_content\": self.include_raw_content,\n \"days\": self.days,\n \"time_range\": self.time_range,\n }\n\n # Only add domains to payload if they exist and have values\n if include_domains:\n payload[\"include_domains\"] = include_domains\n if exclude_domains:\n payload[\"exclude_domains\"] = exclude_domains\n\n # Add conditional parameters only if they should be included\n if self.search_depth == \"advanced\" and self.chunks_per_source:\n payload[\"chunks_per_source\"] = self.chunks_per_source\n\n if self.topic == \"news\" and self.days:\n payload[\"days\"] = int(self.days) # Ensure days is an integer\n\n # Add time_range if it's set\n if hasattr(self, \"time_range\") and self.time_range:\n payload[\"time_range\"] = self.time_range\n\n # Add timeout handling\n with httpx.Client(timeout=90.0) as client:\n response = client.post(url, json=payload, headers=headers)\n\n response.raise_for_status()\n search_results = response.json()\n\n data_results = []\n\n if self.include_answer and search_results.get(\"answer\"):\n data_results.append(Data(text=search_results[\"answer\"]))\n\n for result in search_results.get(\"results\", []):\n content = result.get(\"content\", \"\")\n result_data = {\n \"title\": result.get(\"title\"),\n \"url\": result.get(\"url\"),\n \"content\": content,\n \"score\": result.get(\"score\"),\n }\n if self.include_raw_content:\n result_data[\"raw_content\"] = result.get(\"raw_content\")\n\n data_results.append(Data(text=content, data=result_data))\n\n if self.include_images and search_results.get(\"images\"):\n data_results.append(Data(text=\"Images found\", data={\"images\": search_results[\"images\"]}))\n\n except httpx.TimeoutException:\n error_message = \"Request timed out (90s). Please try again or adjust parameters.\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.HTTPStatusError as exc:\n error_message = f\"HTTP error occurred: {exc.response.status_code} - {exc.response.text}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.RequestError as exc:\n error_message = f\"Request error occurred: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except ValueError as exc:\n error_message = f\"Invalid response format: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n else:\n self.status = data_results\n return data_results\n\n def fetch_content_dataframe(self) -> DataFrame:\n data = self.fetch_content()\n return DataFrame(data)\n"
},
"days": {
"_input_type": "IntInput",
@@ -2910,7 +2910,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
+ "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
},
"input_value": {
"_input_type": "MessageInput",
@@ -3303,7 +3303,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
+ "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
},
"input_value": {
"_input_type": "MessageInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Invoice Summarizer.json b/src/backend/base/langflow/initial_setup/starter_projects/Invoice Summarizer.json
index d96e8a788..c2ab63ed6 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Invoice Summarizer.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Invoice Summarizer.json
@@ -213,7 +213,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"template": {
"_input_type": "PromptInput",
@@ -406,7 +406,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -953,7 +953,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
@@ -1356,7 +1356,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
+ "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
},
"handle_parsing_errors": {
"_input_type": "BoolInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Market Research.json b/src/backend/base/langflow/initial_setup/starter_projects/Market Research.json
index f841ad5ef..bc5ff9042 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Market Research.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Market Research.json
@@ -280,7 +280,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
@@ -597,7 +597,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -889,7 +889,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from pydantic import BaseModel, Field, create_model\nfrom trustcall import create_extractor\n\nfrom langflow.base.models.chat_result import get_chat_result\nfrom langflow.custom import Component\nfrom langflow.helpers.base_model import build_model_from_schema\nfrom langflow.io import (\n BoolInput,\n HandleInput,\n MessageTextInput,\n MultilineInput,\n Output,\n TableInput,\n)\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.table import EditMode\n\n\nclass StructuredOutputComponent(Component):\n display_name = \"Structured Output\"\n description = (\n \"Transforms LLM responses into **structured data formats**. Ideal for extracting specific information \"\n \"or creating consistent outputs.\"\n )\n name = \"StructuredOutput\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"llm\",\n display_name=\"Language Model\",\n info=\"The language model to use to generate the structured output.\",\n input_types=[\"LanguageModel\"],\n required=True,\n ),\n MessageTextInput(\n name=\"input_value\",\n display_name=\"Input Message\",\n info=\"The input message to the language model.\",\n tool_mode=True,\n required=True,\n ),\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Format Instructions\",\n info=\"The instructions to the language model for formatting the output.\",\n value=(\n \"You are an AI system designed to extract structured information from unstructured text.\"\n \"Given the input_text, return a JSON object with predefined keys based on the expected structure.\"\n \"Extract values accurately and format them according to the specified type \"\n \"(e.g., string, integer, float, date).\"\n \"If a value is missing or cannot be determined, return a default \"\n \"(e.g., null, 0, or 'N/A').\"\n \"If multiple instances of the expected structure exist within the input_text, \"\n \"stream each as a separate JSON object.\"\n ),\n required=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"schema_name\",\n display_name=\"Schema Name\",\n info=\"Provide a name for the output data schema.\",\n advanced=True,\n ),\n TableInput(\n name=\"output_schema\",\n display_name=\"Output Schema\",\n info=\"Define the structure and data types for the model's output.\",\n required=True,\n # TODO: remove deault value\n table_schema=[\n {\n \"name\": \"name\",\n \"display_name\": \"Name\",\n \"type\": \"str\",\n \"description\": \"Specify the name of the output field.\",\n \"default\": \"field\",\n \"edit_mode\": EditMode.INLINE,\n },\n {\n \"name\": \"description\",\n \"display_name\": \"Description\",\n \"type\": \"str\",\n \"description\": \"Describe the purpose of the output field.\",\n \"default\": \"description of field\",\n \"edit_mode\": EditMode.POPOVER,\n },\n {\n \"name\": \"type\",\n \"display_name\": \"Type\",\n \"type\": \"str\",\n \"edit_mode\": EditMode.INLINE,\n \"description\": (\"Indicate the data type of the output field (e.g., str, int, float, bool, dict).\"),\n \"options\": [\"str\", \"int\", \"float\", \"bool\", \"dict\"],\n \"default\": \"str\",\n },\n ],\n value=[\n {\n \"name\": \"field\",\n \"description\": \"description of field\",\n \"type\": \"str\",\n \"multiple\": \"False\",\n }\n ],\n ),\n BoolInput(\n name=\"multiple\",\n advanced=True,\n display_name=\"Generate Multiple\",\n info=\"[Deprecated] Always set to True\",\n value=True,\n ),\n ]\n\n outputs = [\n Output(\n name=\"structured_output\",\n display_name=\"Structured Output\",\n method=\"build_structured_output\",\n ),\n Output(\n name=\"structured_output_dataframe\",\n display_name=\"DataFrame\",\n method=\"as_dataframe\",\n ),\n ]\n\n def build_structured_output_base(self) -> Data:\n schema_name = self.schema_name or \"OutputModel\"\n\n if not hasattr(self.llm, \"with_structured_output\"):\n msg = \"Language model does not support structured output.\"\n raise TypeError(msg)\n if not self.output_schema:\n msg = \"Output schema cannot be empty\"\n raise ValueError(msg)\n\n output_model_ = build_model_from_schema(self.output_schema)\n\n output_model = create_model(\n schema_name,\n __doc__=f\"A list of {schema_name}.\",\n objects=(list[output_model_], Field(description=f\"A list of {schema_name}.\")), # type: ignore[valid-type]\n )\n\n try:\n llm_with_structured_output = create_extractor(self.llm, tools=[output_model])\n except NotImplementedError as exc:\n msg = f\"{self.llm.__class__.__name__} does not support structured output.\"\n raise TypeError(msg) from exc\n config_dict = {\n \"run_name\": self.display_name,\n \"project_name\": self.get_project_name(),\n \"callbacks\": self.get_langchain_callbacks(),\n }\n result = get_chat_result(\n runnable=llm_with_structured_output,\n system_message=self.system_prompt,\n input_value=self.input_value,\n config=config_dict,\n )\n if isinstance(result, BaseModel):\n result = result.model_dump()\n if responses := result.get(\"responses\"):\n result = responses[0].model_dump()\n if result and \"objects\" in result:\n return result[\"objects\"]\n\n return result\n\n def build_structured_output(self) -> Data:\n output = self.build_structured_output_base()\n\n return Data(text_key=\"results\", data={\"results\": output})\n\n def as_dataframe(self) -> DataFrame:\n output = self.build_structured_output_base()\n if isinstance(output, list):\n return DataFrame(data=output)\n return DataFrame(data=[output])\n"
+ "value": "from pydantic import BaseModel, Field, create_model\nfrom trustcall import create_extractor\n\nfrom langflow.base.models.chat_result import get_chat_result\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.helpers.base_model import build_model_from_schema\nfrom langflow.io import (\n BoolInput,\n HandleInput,\n MessageTextInput,\n MultilineInput,\n Output,\n TableInput,\n)\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.table import EditMode\n\n\nclass StructuredOutputComponent(Component):\n display_name = \"Structured Output\"\n description = (\n \"Transforms LLM responses into **structured data formats**. Ideal for extracting specific information \"\n \"or creating consistent outputs.\"\n )\n name = \"StructuredOutput\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"llm\",\n display_name=\"Language Model\",\n info=\"The language model to use to generate the structured output.\",\n input_types=[\"LanguageModel\"],\n required=True,\n ),\n MessageTextInput(\n name=\"input_value\",\n display_name=\"Input Message\",\n info=\"The input message to the language model.\",\n tool_mode=True,\n required=True,\n ),\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Format Instructions\",\n info=\"The instructions to the language model for formatting the output.\",\n value=(\n \"You are an AI system designed to extract structured information from unstructured text.\"\n \"Given the input_text, return a JSON object with predefined keys based on the expected structure.\"\n \"Extract values accurately and format them according to the specified type \"\n \"(e.g., string, integer, float, date).\"\n \"If a value is missing or cannot be determined, return a default \"\n \"(e.g., null, 0, or 'N/A').\"\n \"If multiple instances of the expected structure exist within the input_text, \"\n \"stream each as a separate JSON object.\"\n ),\n required=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"schema_name\",\n display_name=\"Schema Name\",\n info=\"Provide a name for the output data schema.\",\n advanced=True,\n ),\n TableInput(\n name=\"output_schema\",\n display_name=\"Output Schema\",\n info=\"Define the structure and data types for the model's output.\",\n required=True,\n # TODO: remove deault value\n table_schema=[\n {\n \"name\": \"name\",\n \"display_name\": \"Name\",\n \"type\": \"str\",\n \"description\": \"Specify the name of the output field.\",\n \"default\": \"field\",\n \"edit_mode\": EditMode.INLINE,\n },\n {\n \"name\": \"description\",\n \"display_name\": \"Description\",\n \"type\": \"str\",\n \"description\": \"Describe the purpose of the output field.\",\n \"default\": \"description of field\",\n \"edit_mode\": EditMode.POPOVER,\n },\n {\n \"name\": \"type\",\n \"display_name\": \"Type\",\n \"type\": \"str\",\n \"edit_mode\": EditMode.INLINE,\n \"description\": (\"Indicate the data type of the output field (e.g., str, int, float, bool, dict).\"),\n \"options\": [\"str\", \"int\", \"float\", \"bool\", \"dict\"],\n \"default\": \"str\",\n },\n ],\n value=[\n {\n \"name\": \"field\",\n \"description\": \"description of field\",\n \"type\": \"str\",\n \"multiple\": \"False\",\n }\n ],\n ),\n BoolInput(\n name=\"multiple\",\n advanced=True,\n display_name=\"Generate Multiple\",\n info=\"[Deprecated] Always set to True\",\n value=True,\n ),\n ]\n\n outputs = [\n Output(\n name=\"structured_output\",\n display_name=\"Structured Output\",\n method=\"build_structured_output\",\n ),\n Output(\n name=\"structured_output_dataframe\",\n display_name=\"DataFrame\",\n method=\"as_dataframe\",\n ),\n ]\n\n def build_structured_output_base(self) -> Data:\n schema_name = self.schema_name or \"OutputModel\"\n\n if not hasattr(self.llm, \"with_structured_output\"):\n msg = \"Language model does not support structured output.\"\n raise TypeError(msg)\n if not self.output_schema:\n msg = \"Output schema cannot be empty\"\n raise ValueError(msg)\n\n output_model_ = build_model_from_schema(self.output_schema)\n\n output_model = create_model(\n schema_name,\n __doc__=f\"A list of {schema_name}.\",\n objects=(list[output_model_], Field(description=f\"A list of {schema_name}.\")), # type: ignore[valid-type]\n )\n\n try:\n llm_with_structured_output = create_extractor(self.llm, tools=[output_model])\n except NotImplementedError as exc:\n msg = f\"{self.llm.__class__.__name__} does not support structured output.\"\n raise TypeError(msg) from exc\n config_dict = {\n \"run_name\": self.display_name,\n \"project_name\": self.get_project_name(),\n \"callbacks\": self.get_langchain_callbacks(),\n }\n result = get_chat_result(\n runnable=llm_with_structured_output,\n system_message=self.system_prompt,\n input_value=self.input_value,\n config=config_dict,\n )\n if isinstance(result, BaseModel):\n result = result.model_dump()\n if responses := result.get(\"responses\"):\n result = responses[0].model_dump()\n if result and \"objects\" in result:\n return result[\"objects\"]\n\n return result\n\n def build_structured_output(self) -> Data:\n output = self.build_structured_output_base()\n\n return Data(text_key=\"results\", data={\"results\": output})\n\n def as_dataframe(self) -> DataFrame:\n output = self.build_structured_output_base()\n if isinstance(output, list):\n return DataFrame(data=output)\n return DataFrame(data=[output])\n"
},
"input_value": {
"_input_type": "MessageTextInput",
@@ -1343,7 +1343,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
+ "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
},
"handle_parsing_errors": {
"_input_type": "BoolInput",
@@ -1941,7 +1941,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import httpx\nfrom loguru import logger\n\nfrom langflow.custom import Component\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, Output, SecretStrInput\nfrom langflow.schema import Data\nfrom langflow.schema.dataframe import DataFrame\n\n\nclass TavilySearchComponent(Component):\n display_name = \"Tavily Search API\"\n description = \"\"\"**Tavily Search** is a search engine optimized for LLMs and RAG, \\\n aimed at efficient, quick, and persistent search results.\"\"\"\n icon = \"TavilyIcon\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Tavily API Key\",\n required=True,\n info=\"Your Tavily API Key.\",\n ),\n MessageTextInput(\n name=\"query\",\n display_name=\"Search Query\",\n info=\"The search query you want to execute with Tavily.\",\n tool_mode=True,\n ),\n DropdownInput(\n name=\"search_depth\",\n display_name=\"Search Depth\",\n info=\"The depth of the search.\",\n options=[\"basic\", \"advanced\"],\n value=\"advanced\",\n advanced=True,\n ),\n IntInput(\n name=\"chunks_per_source\",\n display_name=\"Chunks Per Source\",\n info=(\"The number of content chunks to retrieve from each source (1-3). Only works with advanced search.\"),\n value=3,\n advanced=True,\n ),\n DropdownInput(\n name=\"topic\",\n display_name=\"Search Topic\",\n info=\"The category of the search.\",\n options=[\"general\", \"news\"],\n value=\"general\",\n advanced=True,\n ),\n IntInput(\n name=\"days\",\n display_name=\"Days\",\n info=\"Number of days back from current date to include. Only available with news topic.\",\n value=7,\n advanced=True,\n ),\n IntInput(\n name=\"max_results\",\n display_name=\"Max Results\",\n info=\"The maximum number of search results to return.\",\n value=5,\n advanced=True,\n ),\n BoolInput(\n name=\"include_answer\",\n display_name=\"Include Answer\",\n info=\"Include a short answer to original query.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"time_range\",\n display_name=\"Time Range\",\n info=\"The time range back from the current date to filter results.\",\n options=[\"day\", \"week\", \"month\", \"year\"],\n value=None, # Default to None to make it optional\n advanced=True,\n ),\n BoolInput(\n name=\"include_images\",\n display_name=\"Include Images\",\n info=\"Include a list of query-related images in the response.\",\n value=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"include_domains\",\n display_name=\"Include Domains\",\n info=\"Comma-separated list of domains to include in the search results.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"exclude_domains\",\n display_name=\"Exclude Domains\",\n info=\"Comma-separated list of domains to exclude from the search results.\",\n advanced=True,\n ),\n BoolInput(\n name=\"include_raw_content\",\n display_name=\"Include Raw Content\",\n info=\"Include the cleaned and parsed HTML content of each search result.\",\n value=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"fetch_content_dataframe\"),\n ]\n\n def fetch_content(self) -> list[Data]:\n try:\n # Only process domains if they're provided\n include_domains = None\n exclude_domains = None\n\n if self.include_domains:\n include_domains = [domain.strip() for domain in self.include_domains.split(\",\") if domain.strip()]\n\n if self.exclude_domains:\n exclude_domains = [domain.strip() for domain in self.exclude_domains.split(\",\") if domain.strip()]\n\n url = \"https://api.tavily.com/search\"\n headers = {\n \"content-type\": \"application/json\",\n \"accept\": \"application/json\",\n }\n\n payload = {\n \"api_key\": self.api_key,\n \"query\": self.query,\n \"search_depth\": self.search_depth,\n \"topic\": self.topic,\n \"max_results\": self.max_results,\n \"include_images\": self.include_images,\n \"include_answer\": self.include_answer,\n \"include_raw_content\": self.include_raw_content,\n \"days\": self.days,\n \"time_range\": self.time_range,\n }\n\n # Only add domains to payload if they exist and have values\n if include_domains:\n payload[\"include_domains\"] = include_domains\n if exclude_domains:\n payload[\"exclude_domains\"] = exclude_domains\n\n # Add conditional parameters only if they should be included\n if self.search_depth == \"advanced\" and self.chunks_per_source:\n payload[\"chunks_per_source\"] = self.chunks_per_source\n\n if self.topic == \"news\" and self.days:\n payload[\"days\"] = int(self.days) # Ensure days is an integer\n\n # Add time_range if it's set\n if hasattr(self, \"time_range\") and self.time_range:\n payload[\"time_range\"] = self.time_range\n\n # Add timeout handling\n with httpx.Client(timeout=90.0) as client:\n response = client.post(url, json=payload, headers=headers)\n\n response.raise_for_status()\n search_results = response.json()\n\n data_results = []\n\n if self.include_answer and search_results.get(\"answer\"):\n data_results.append(Data(text=search_results[\"answer\"]))\n\n for result in search_results.get(\"results\", []):\n content = result.get(\"content\", \"\")\n result_data = {\n \"title\": result.get(\"title\"),\n \"url\": result.get(\"url\"),\n \"content\": content,\n \"score\": result.get(\"score\"),\n }\n if self.include_raw_content:\n result_data[\"raw_content\"] = result.get(\"raw_content\")\n\n data_results.append(Data(text=content, data=result_data))\n\n if self.include_images and search_results.get(\"images\"):\n data_results.append(Data(text=\"Images found\", data={\"images\": search_results[\"images\"]}))\n\n except httpx.TimeoutException:\n error_message = \"Request timed out (90s). Please try again or adjust parameters.\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.HTTPStatusError as exc:\n error_message = f\"HTTP error occurred: {exc.response.status_code} - {exc.response.text}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.RequestError as exc:\n error_message = f\"Request error occurred: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except ValueError as exc:\n error_message = f\"Invalid response format: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n else:\n self.status = data_results\n return data_results\n\n def fetch_content_dataframe(self) -> DataFrame:\n data = self.fetch_content()\n return DataFrame(data)\n"
+ "value": "import httpx\nfrom loguru import logger\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.template.field.base import Output\n\n\nclass TavilySearchComponent(Component):\n display_name = \"Tavily Search API\"\n description = \"\"\"**Tavily Search** is a search engine optimized for LLMs and RAG, \\\n aimed at efficient, quick, and persistent search results.\"\"\"\n icon = \"TavilyIcon\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Tavily API Key\",\n required=True,\n info=\"Your Tavily API Key.\",\n ),\n MessageTextInput(\n name=\"query\",\n display_name=\"Search Query\",\n info=\"The search query you want to execute with Tavily.\",\n tool_mode=True,\n ),\n DropdownInput(\n name=\"search_depth\",\n display_name=\"Search Depth\",\n info=\"The depth of the search.\",\n options=[\"basic\", \"advanced\"],\n value=\"advanced\",\n advanced=True,\n ),\n IntInput(\n name=\"chunks_per_source\",\n display_name=\"Chunks Per Source\",\n info=(\"The number of content chunks to retrieve from each source (1-3). Only works with advanced search.\"),\n value=3,\n advanced=True,\n ),\n DropdownInput(\n name=\"topic\",\n display_name=\"Search Topic\",\n info=\"The category of the search.\",\n options=[\"general\", \"news\"],\n value=\"general\",\n advanced=True,\n ),\n IntInput(\n name=\"days\",\n display_name=\"Days\",\n info=\"Number of days back from current date to include. Only available with news topic.\",\n value=7,\n advanced=True,\n ),\n IntInput(\n name=\"max_results\",\n display_name=\"Max Results\",\n info=\"The maximum number of search results to return.\",\n value=5,\n advanced=True,\n ),\n BoolInput(\n name=\"include_answer\",\n display_name=\"Include Answer\",\n info=\"Include a short answer to original query.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"time_range\",\n display_name=\"Time Range\",\n info=\"The time range back from the current date to filter results.\",\n options=[\"day\", \"week\", \"month\", \"year\"],\n value=None, # Default to None to make it optional\n advanced=True,\n ),\n BoolInput(\n name=\"include_images\",\n display_name=\"Include Images\",\n info=\"Include a list of query-related images in the response.\",\n value=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"include_domains\",\n display_name=\"Include Domains\",\n info=\"Comma-separated list of domains to include in the search results.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"exclude_domains\",\n display_name=\"Exclude Domains\",\n info=\"Comma-separated list of domains to exclude from the search results.\",\n advanced=True,\n ),\n BoolInput(\n name=\"include_raw_content\",\n display_name=\"Include Raw Content\",\n info=\"Include the cleaned and parsed HTML content of each search result.\",\n value=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"fetch_content_dataframe\"),\n ]\n\n def fetch_content(self) -> list[Data]:\n try:\n # Only process domains if they're provided\n include_domains = None\n exclude_domains = None\n\n if self.include_domains:\n include_domains = [domain.strip() for domain in self.include_domains.split(\",\") if domain.strip()]\n\n if self.exclude_domains:\n exclude_domains = [domain.strip() for domain in self.exclude_domains.split(\",\") if domain.strip()]\n\n url = \"https://api.tavily.com/search\"\n headers = {\n \"content-type\": \"application/json\",\n \"accept\": \"application/json\",\n }\n\n payload = {\n \"api_key\": self.api_key,\n \"query\": self.query,\n \"search_depth\": self.search_depth,\n \"topic\": self.topic,\n \"max_results\": self.max_results,\n \"include_images\": self.include_images,\n \"include_answer\": self.include_answer,\n \"include_raw_content\": self.include_raw_content,\n \"days\": self.days,\n \"time_range\": self.time_range,\n }\n\n # Only add domains to payload if they exist and have values\n if include_domains:\n payload[\"include_domains\"] = include_domains\n if exclude_domains:\n payload[\"exclude_domains\"] = exclude_domains\n\n # Add conditional parameters only if they should be included\n if self.search_depth == \"advanced\" and self.chunks_per_source:\n payload[\"chunks_per_source\"] = self.chunks_per_source\n\n if self.topic == \"news\" and self.days:\n payload[\"days\"] = int(self.days) # Ensure days is an integer\n\n # Add time_range if it's set\n if hasattr(self, \"time_range\") and self.time_range:\n payload[\"time_range\"] = self.time_range\n\n # Add timeout handling\n with httpx.Client(timeout=90.0) as client:\n response = client.post(url, json=payload, headers=headers)\n\n response.raise_for_status()\n search_results = response.json()\n\n data_results = []\n\n if self.include_answer and search_results.get(\"answer\"):\n data_results.append(Data(text=search_results[\"answer\"]))\n\n for result in search_results.get(\"results\", []):\n content = result.get(\"content\", \"\")\n result_data = {\n \"title\": result.get(\"title\"),\n \"url\": result.get(\"url\"),\n \"content\": content,\n \"score\": result.get(\"score\"),\n }\n if self.include_raw_content:\n result_data[\"raw_content\"] = result.get(\"raw_content\")\n\n data_results.append(Data(text=content, data=result_data))\n\n if self.include_images and search_results.get(\"images\"):\n data_results.append(Data(text=\"Images found\", data={\"images\": search_results[\"images\"]}))\n\n except httpx.TimeoutException:\n error_message = \"Request timed out (90s). Please try again or adjust parameters.\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.HTTPStatusError as exc:\n error_message = f\"HTTP error occurred: {exc.response.status_code} - {exc.response.text}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.RequestError as exc:\n error_message = f\"Request error occurred: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except ValueError as exc:\n error_message = f\"Invalid response format: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n else:\n self.status = data_results\n return data_results\n\n def fetch_content_dataframe(self) -> DataFrame:\n data = self.fetch_content()\n return DataFrame(data)\n"
},
"days": {
"_input_type": "IntInput",
@@ -2417,7 +2417,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
+ "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
},
"input_value": {
"_input_type": "MessageInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Meeting Summary.json b/src/backend/base/langflow/initial_setup/starter_projects/Meeting Summary.json
index 0b90f4010..8689d9fa0 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Meeting Summary.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Meeting Summary.json
@@ -395,7 +395,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import assemblyai as aai\nfrom loguru import logger\n\nfrom langflow.custom import Component\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.io import DataInput, FloatInput, Output, SecretStrInput\nfrom langflow.schema import Data\n\n\nclass AssemblyAITranscriptionJobPoller(Component):\n display_name = \"AssemblyAI Poll Transcript\"\n description = \"Poll for the status of a transcription job using AssemblyAI\"\n documentation = \"https://www.assemblyai.com/docs\"\n icon = \"AssemblyAI\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Assembly API Key\",\n info=\"Your AssemblyAI API key. You can get one from https://www.assemblyai.com/\",\n required=True,\n ),\n DataInput(\n name=\"transcript_id\",\n display_name=\"Transcript ID\",\n info=\"The ID of the transcription job to poll\",\n required=True,\n ),\n FloatInput(\n name=\"polling_interval\",\n display_name=\"Polling Interval\",\n value=3.0,\n info=\"The polling interval in seconds\",\n advanced=True,\n range_spec=RangeSpec(min=3, max=30),\n ),\n ]\n\n outputs = [\n Output(display_name=\"Transcription Result\", name=\"transcription_result\", method=\"poll_transcription_job\"),\n ]\n\n def poll_transcription_job(self) -> Data:\n \"\"\"Polls the transcription status until completion and returns the Data.\"\"\"\n aai.settings.api_key = self.api_key\n aai.settings.polling_interval = self.polling_interval\n\n # check if it's an error message from the previous step\n if self.transcript_id.data.get(\"error\"):\n self.status = self.transcript_id.data[\"error\"]\n return self.transcript_id\n\n try:\n transcript = aai.Transcript.get_by_id(self.transcript_id.data[\"transcript_id\"])\n except Exception as e: # noqa: BLE001\n error = f\"Getting transcription failed: {e}\"\n logger.opt(exception=True).debug(error)\n self.status = error\n return Data(data={\"error\": error})\n\n if transcript.status == aai.TranscriptStatus.completed:\n json_response = transcript.json_response\n text = json_response.pop(\"text\", None)\n utterances = json_response.pop(\"utterances\", None)\n transcript_id = json_response.pop(\"id\", None)\n sorted_data = {\"text\": text, \"utterances\": utterances, \"id\": transcript_id}\n sorted_data.update(json_response)\n data = Data(data=sorted_data)\n self.status = data\n return data\n self.status = transcript.error\n return Data(data={\"error\": transcript.error})\n"
+ "value": "import assemblyai as aai\nfrom loguru import logger\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.io import DataInput, FloatInput, Output, SecretStrInput\nfrom langflow.schema.data import Data\n\n\nclass AssemblyAITranscriptionJobPoller(Component):\n display_name = \"AssemblyAI Poll Transcript\"\n description = \"Poll for the status of a transcription job using AssemblyAI\"\n documentation = \"https://www.assemblyai.com/docs\"\n icon = \"AssemblyAI\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Assembly API Key\",\n info=\"Your AssemblyAI API key. You can get one from https://www.assemblyai.com/\",\n required=True,\n ),\n DataInput(\n name=\"transcript_id\",\n display_name=\"Transcript ID\",\n info=\"The ID of the transcription job to poll\",\n required=True,\n ),\n FloatInput(\n name=\"polling_interval\",\n display_name=\"Polling Interval\",\n value=3.0,\n info=\"The polling interval in seconds\",\n advanced=True,\n range_spec=RangeSpec(min=3, max=30),\n ),\n ]\n\n outputs = [\n Output(display_name=\"Transcription Result\", name=\"transcription_result\", method=\"poll_transcription_job\"),\n ]\n\n def poll_transcription_job(self) -> Data:\n \"\"\"Polls the transcription status until completion and returns the Data.\"\"\"\n aai.settings.api_key = self.api_key\n aai.settings.polling_interval = self.polling_interval\n\n # check if it's an error message from the previous step\n if self.transcript_id.data.get(\"error\"):\n self.status = self.transcript_id.data[\"error\"]\n return self.transcript_id\n\n try:\n transcript = aai.Transcript.get_by_id(self.transcript_id.data[\"transcript_id\"])\n except Exception as e: # noqa: BLE001\n error = f\"Getting transcription failed: {e}\"\n logger.opt(exception=True).debug(error)\n self.status = error\n return Data(data={\"error\": error})\n\n if transcript.status == aai.TranscriptStatus.completed:\n json_response = transcript.json_response\n text = json_response.pop(\"text\", None)\n utterances = json_response.pop(\"utterances\", None)\n transcript_id = json_response.pop(\"id\", None)\n sorted_data = {\"text\": text, \"utterances\": utterances, \"id\": transcript_id}\n sorted_data.update(json_response)\n data = Data(data=sorted_data)\n self.status = data\n return data\n self.status = transcript.error\n return Data(data={\"error\": transcript.error})\n"
},
"polling_interval": {
"_input_type": "FloatInput",
@@ -539,7 +539,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.custom import Component\nfrom langflow.helpers.data import data_to_text, data_to_text_list\nfrom langflow.io import DataInput, MultilineInput, Output, StrInput\nfrom langflow.schema import Data\nfrom langflow.schema.message import Message\n\n\nclass ParseDataComponent(Component):\n display_name = \"Data to Message\"\n description = \"Convert Data objects into Messages using any {field_name} from input data.\"\n icon = \"message-square\"\n name = \"ParseData\"\n legacy = True\n metadata = {\n \"legacy_name\": \"Parse Data\",\n }\n\n inputs = [\n DataInput(\n name=\"data\",\n display_name=\"Data\",\n info=\"The data to convert to text.\",\n is_list=True,\n required=True,\n ),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. \"\n \"It can contain the keys {text}, {data} or any other key in the Data.\",\n value=\"{text}\",\n required=True,\n ),\n StrInput(name=\"sep\", display_name=\"Separator\", advanced=True, value=\"\\n\"),\n ]\n\n outputs = [\n Output(\n display_name=\"Message\",\n name=\"text\",\n info=\"Data as a single Message, with each input Data separated by Separator\",\n method=\"parse_data\",\n ),\n Output(\n display_name=\"Data List\",\n name=\"data_list\",\n info=\"Data as a list of new Data, each having `text` formatted by Template\",\n method=\"parse_data_as_list\",\n ),\n ]\n\n def _clean_args(self) -> tuple[list[Data], str, str]:\n data = self.data if isinstance(self.data, list) else [self.data]\n template = self.template\n sep = self.sep\n return data, template, sep\n\n def parse_data(self) -> Message:\n data, template, sep = self._clean_args()\n result_string = data_to_text(template, data, sep)\n self.status = result_string\n return Message(text=result_string)\n\n def parse_data_as_list(self) -> list[Data]:\n data, template, _ = self._clean_args()\n text_list, data_list = data_to_text_list(template, data)\n for item, text in zip(data_list, text_list, strict=True):\n item.set_text(text)\n self.status = data_list\n return data_list\n"
+ "value": "from langflow.custom.custom_component.component import Component\nfrom langflow.helpers.data import data_to_text, data_to_text_list\nfrom langflow.io import DataInput, MultilineInput, Output, StrInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.message import Message\n\n\nclass ParseDataComponent(Component):\n display_name = \"Data to Message\"\n description = \"Convert Data objects into Messages using any {field_name} from input data.\"\n icon = \"message-square\"\n name = \"ParseData\"\n legacy = True\n metadata = {\n \"legacy_name\": \"Parse Data\",\n }\n\n inputs = [\n DataInput(\n name=\"data\",\n display_name=\"Data\",\n info=\"The data to convert to text.\",\n is_list=True,\n required=True,\n ),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. \"\n \"It can contain the keys {text}, {data} or any other key in the Data.\",\n value=\"{text}\",\n required=True,\n ),\n StrInput(name=\"sep\", display_name=\"Separator\", advanced=True, value=\"\\n\"),\n ]\n\n outputs = [\n Output(\n display_name=\"Message\",\n name=\"text\",\n info=\"Data as a single Message, with each input Data separated by Separator\",\n method=\"parse_data\",\n ),\n Output(\n display_name=\"Data List\",\n name=\"data_list\",\n info=\"Data as a list of new Data, each having `text` formatted by Template\",\n method=\"parse_data_as_list\",\n ),\n ]\n\n def _clean_args(self) -> tuple[list[Data], str, str]:\n data = self.data if isinstance(self.data, list) else [self.data]\n template = self.template\n sep = self.sep\n return data, template, sep\n\n def parse_data(self) -> Message:\n data, template, sep = self._clean_args()\n result_string = data_to_text(template, data, sep)\n self.status = result_string\n return Message(text=result_string)\n\n def parse_data_as_list(self) -> list[Data]:\n data, template, _ = self._clean_args()\n text_list, data_list = data_to_text_list(template, data)\n for item, text in zip(data_list, text_list, strict=True):\n item.set_text(text)\n self.status = data_list\n return data_list\n"
},
"data": {
"_input_type": "DataInput",
@@ -738,7 +738,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
+ "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
},
"input_value": {
"_input_type": "MessageInput",
@@ -1086,7 +1086,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"template": {
"_input_type": "PromptInput",
@@ -1300,7 +1300,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -1602,7 +1602,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -1886,7 +1886,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
+ "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
},
"input_value": {
"_input_type": "MessageInput",
@@ -2296,7 +2296,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -2537,7 +2537,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"history": {
"advanced": false,
@@ -2708,7 +2708,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any, cast\n\nfrom langflow.custom import Component\nfrom langflow.inputs import HandleInput\nfrom langflow.io import DropdownInput, IntInput, MessageTextInput, MultilineInput, Output, TabInput\nfrom langflow.memory import aget_messages, astore_message\nfrom langflow.schema import Data, dotdict\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.utils.component_utils import set_current_fields, set_field_display\nfrom langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI, MESSAGE_SENDER_USER\n\n\nclass MemoryComponent(Component):\n display_name = \"Message History\"\n description = \"Stores or retrieves stored chat messages from Langflow tables or an external memory.\"\n icon = \"message-square-more\"\n name = \"Memory\"\n default_keys = [\"mode\", \"memory\"]\n mode_config = {\n \"Store\": [\"message\", \"memory\", \"sender\", \"sender_name\", \"session_id\"],\n \"Retrieve\": [\"n_messages\", \"order\", \"template\", \"memory\"],\n }\n\n inputs = [\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Retrieve\", \"Store\"],\n value=\"Retrieve\",\n info=\"Operation mode: Store messages or Retrieve messages.\",\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"message\",\n display_name=\"Message\",\n info=\"The chat message to be stored.\",\n tool_mode=True,\n dynamic=True,\n show=False,\n ),\n HandleInput(\n name=\"memory\",\n display_name=\"External Memory\",\n input_types=[\"Memory\"],\n info=\"Retrieve messages from an external memory. If empty, it will use the Langflow tables.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER, \"Machine and User\"],\n value=\"Machine and User\",\n info=\"Filter by sender type.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Filter by sender name.\",\n advanced=True,\n show=False,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Messages\",\n value=100,\n info=\"Number of messages to retrieve.\",\n advanced=True,\n show=False,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"order\",\n display_name=\"Order\",\n options=[\"Ascending\", \"Descending\"],\n value=\"Ascending\",\n info=\"Order of the messages.\",\n advanced=True,\n tool_mode=True,\n required=True,\n show=False,\n ),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. \"\n \"It can contain the keys {text}, {sender} or any other key in the message data.\",\n value=\"{sender_name}: {text}\",\n advanced=True,\n show=False,\n ),\n ]\n\n outputs = [Output(display_name=\"Messages\", name=\"dataframe\", method=\"retrieve_messages_dataframe\", dynamic=True)]\n\n def update_outputs(self, frontend_node: dict, field_name: str, field_value: Any) -> dict:\n \"\"\"Dynamically show only the relevant output based on the selected output type.\"\"\"\n if field_name == \"mode\":\n # Start with empty outputs\n frontend_node[\"outputs\"] = []\n if field_value == \"Store\":\n frontend_node[\"outputs\"] = [\n Output(\n display_name=\"Stored Messages\",\n name=\"stored_messages\",\n method=\"store_message\",\n hidden=True,\n dynamic=True,\n )\n ]\n if field_value == \"Retrieve\":\n frontend_node[\"outputs\"] = [\n Output(\n display_name=\"Messages\", name=\"dataframe\", method=\"retrieve_messages_dataframe\", dynamic=True\n )\n ]\n return frontend_node\n\n async def retrieve_messages(self) -> Data:\n sender = self.sender\n sender_name = self.sender_name\n session_id = self.session_id\n n_messages = self.n_messages\n order = \"DESC\" if self.order == \"Descending\" else \"ASC\"\n\n if sender == \"Machine and User\":\n sender = None\n\n if self.memory and not hasattr(self.memory, \"aget_messages\"):\n memory_name = type(self.memory).__name__\n err_msg = f\"External Memory object ({memory_name}) must have 'aget_messages' method.\"\n raise AttributeError(err_msg)\n # Check if n_messages is None or 0\n if n_messages == 0:\n stored = []\n elif self.memory:\n # override session_id\n self.memory.session_id = session_id\n\n stored = await self.memory.aget_messages()\n # langchain memories are supposed to return messages in ascending order\n if order == \"DESC\":\n stored = stored[::-1]\n if n_messages:\n stored = stored[:n_messages]\n stored = [Message.from_lc_message(m) for m in stored]\n if sender:\n expected_type = MESSAGE_SENDER_AI if sender == MESSAGE_SENDER_AI else MESSAGE_SENDER_USER\n stored = [m for m in stored if m.type == expected_type]\n else:\n stored = await aget_messages(\n sender=sender,\n sender_name=sender_name,\n session_id=session_id,\n limit=n_messages,\n order=order,\n )\n self.status = stored\n return cast(Data, stored)\n\n async def retrieve_messages_dataframe(self) -> DataFrame:\n \"\"\"Convert the retrieved messages into a DataFrame.\n\n Returns:\n DataFrame: A DataFrame containing the message data.\n \"\"\"\n messages = await self.retrieve_messages()\n return DataFrame(messages)\n\n async def store_message(self) -> Message:\n message = Message(text=self.message) if isinstance(self.message, str) else self.message\n\n message.session_id = self.session_id or message.session_id\n message.sender = self.sender or message.sender or MESSAGE_SENDER_AI\n message.sender_name = self.sender_name or message.sender_name or MESSAGE_SENDER_NAME_AI\n\n stored_messages: list[Message] = []\n\n if self.memory:\n self.memory.session_id = message.session_id\n lc_message = message.to_lc_message()\n await self.memory.aadd_messages([lc_message])\n\n stored_messages = await self.memory.aget_messages() or []\n\n stored_messages = [Message.from_lc_message(m) for m in stored_messages] if stored_messages else []\n\n if message.sender:\n stored_messages = [m for m in stored_messages if m.sender == message.sender]\n else:\n await astore_message(message, flow_id=self.graph.flow_id)\n stored_messages = (\n await aget_messages(\n session_id=message.session_id, sender_name=message.sender_name, sender=message.sender\n )\n or []\n )\n\n if not stored_messages:\n msg = \"No messages were stored. Please ensure that the session ID and sender are properly set.\"\n raise ValueError(msg)\n\n stored_message = stored_messages[0]\n self.status = stored_message\n return stored_message\n\n def update_build_config(\n self,\n build_config: dotdict,\n field_value: Any, # noqa: ARG002\n field_name: str | None = None, # noqa: ARG002\n ) -> dotdict:\n return set_current_fields(\n build_config=build_config,\n action_fields=self.mode_config,\n selected_action=build_config[\"mode\"][\"value\"],\n default_fields=self.default_keys,\n func=set_field_display,\n )\n"
+ "value": "from typing import Any, cast\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DropdownInput, HandleInput, IntInput, MessageTextInput, MultilineInput, TabInput\nfrom langflow.memory import aget_messages, astore_message\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\nfrom langflow.template.field.base import Output\nfrom langflow.utils.component_utils import set_current_fields, set_field_display\nfrom langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI, MESSAGE_SENDER_USER\n\n\nclass MemoryComponent(Component):\n display_name = \"Message History\"\n description = \"Stores or retrieves stored chat messages from Langflow tables or an external memory.\"\n icon = \"message-square-more\"\n name = \"Memory\"\n default_keys = [\"mode\", \"memory\"]\n mode_config = {\n \"Store\": [\"message\", \"memory\", \"sender\", \"sender_name\", \"session_id\"],\n \"Retrieve\": [\"n_messages\", \"order\", \"template\", \"memory\"],\n }\n\n inputs = [\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Retrieve\", \"Store\"],\n value=\"Retrieve\",\n info=\"Operation mode: Store messages or Retrieve messages.\",\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"message\",\n display_name=\"Message\",\n info=\"The chat message to be stored.\",\n tool_mode=True,\n dynamic=True,\n show=False,\n ),\n HandleInput(\n name=\"memory\",\n display_name=\"External Memory\",\n input_types=[\"Memory\"],\n info=\"Retrieve messages from an external memory. If empty, it will use the Langflow tables.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER, \"Machine and User\"],\n value=\"Machine and User\",\n info=\"Filter by sender type.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Filter by sender name.\",\n advanced=True,\n show=False,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Messages\",\n value=100,\n info=\"Number of messages to retrieve.\",\n advanced=True,\n show=False,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"order\",\n display_name=\"Order\",\n options=[\"Ascending\", \"Descending\"],\n value=\"Ascending\",\n info=\"Order of the messages.\",\n advanced=True,\n tool_mode=True,\n required=True,\n show=False,\n ),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. \"\n \"It can contain the keys {text}, {sender} or any other key in the message data.\",\n value=\"{sender_name}: {text}\",\n advanced=True,\n show=False,\n ),\n ]\n\n outputs = [Output(display_name=\"Messages\", name=\"dataframe\", method=\"retrieve_messages_dataframe\", dynamic=True)]\n\n def update_outputs(self, frontend_node: dict, field_name: str, field_value: Any) -> dict:\n \"\"\"Dynamically show only the relevant output based on the selected output type.\"\"\"\n if field_name == \"mode\":\n # Start with empty outputs\n frontend_node[\"outputs\"] = []\n if field_value == \"Store\":\n frontend_node[\"outputs\"] = [\n Output(\n display_name=\"Stored Messages\",\n name=\"stored_messages\",\n method=\"store_message\",\n hidden=True,\n dynamic=True,\n )\n ]\n if field_value == \"Retrieve\":\n frontend_node[\"outputs\"] = [\n Output(\n display_name=\"Messages\", name=\"dataframe\", method=\"retrieve_messages_dataframe\", dynamic=True\n )\n ]\n return frontend_node\n\n async def retrieve_messages(self) -> Data:\n sender = self.sender\n sender_name = self.sender_name\n session_id = self.session_id\n n_messages = self.n_messages\n order = \"DESC\" if self.order == \"Descending\" else \"ASC\"\n\n if sender == \"Machine and User\":\n sender = None\n\n if self.memory and not hasattr(self.memory, \"aget_messages\"):\n memory_name = type(self.memory).__name__\n err_msg = f\"External Memory object ({memory_name}) must have 'aget_messages' method.\"\n raise AttributeError(err_msg)\n # Check if n_messages is None or 0\n if n_messages == 0:\n stored = []\n elif self.memory:\n # override session_id\n self.memory.session_id = session_id\n\n stored = await self.memory.aget_messages()\n # langchain memories are supposed to return messages in ascending order\n if order == \"DESC\":\n stored = stored[::-1]\n if n_messages:\n stored = stored[:n_messages]\n stored = [Message.from_lc_message(m) for m in stored]\n if sender:\n expected_type = MESSAGE_SENDER_AI if sender == MESSAGE_SENDER_AI else MESSAGE_SENDER_USER\n stored = [m for m in stored if m.type == expected_type]\n else:\n stored = await aget_messages(\n sender=sender,\n sender_name=sender_name,\n session_id=session_id,\n limit=n_messages,\n order=order,\n )\n self.status = stored\n return cast(Data, stored)\n\n async def retrieve_messages_dataframe(self) -> DataFrame:\n \"\"\"Convert the retrieved messages into a DataFrame.\n\n Returns:\n DataFrame: A DataFrame containing the message data.\n \"\"\"\n messages = await self.retrieve_messages()\n return DataFrame(messages)\n\n async def store_message(self) -> Message:\n message = Message(text=self.message) if isinstance(self.message, str) else self.message\n\n message.session_id = self.session_id or message.session_id\n message.sender = self.sender or message.sender or MESSAGE_SENDER_AI\n message.sender_name = self.sender_name or message.sender_name or MESSAGE_SENDER_NAME_AI\n\n stored_messages: list[Message] = []\n\n if self.memory:\n self.memory.session_id = message.session_id\n lc_message = message.to_lc_message()\n await self.memory.aadd_messages([lc_message])\n\n stored_messages = await self.memory.aget_messages() or []\n\n stored_messages = [Message.from_lc_message(m) for m in stored_messages] if stored_messages else []\n\n if message.sender:\n stored_messages = [m for m in stored_messages if m.sender == message.sender]\n else:\n await astore_message(message, flow_id=self.graph.flow_id)\n stored_messages = (\n await aget_messages(\n session_id=message.session_id, sender_name=message.sender_name, sender=message.sender\n )\n or []\n )\n\n if not stored_messages:\n msg = \"No messages were stored. Please ensure that the session ID and sender are properly set.\"\n raise ValueError(msg)\n\n stored_message = stored_messages[0]\n self.status = stored_message\n return stored_message\n\n def update_build_config(\n self,\n build_config: dotdict,\n field_value: Any, # noqa: ARG002\n field_name: str | None = None, # noqa: ARG002\n ) -> dotdict:\n return set_current_fields(\n build_config=build_config,\n action_fields=self.mode_config,\n selected_action=build_config[\"mode\"][\"value\"],\n default_fields=self.default_keys,\n func=set_field_display,\n )\n"
},
"memory": {
"_input_type": "HandleInput",
@@ -3038,7 +3038,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
@@ -3508,7 +3508,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from pathlib import Path\n\nimport assemblyai as aai\nfrom loguru import logger\n\nfrom langflow.custom import Component\nfrom langflow.io import BoolInput, DropdownInput, FileInput, MessageTextInput, Output, SecretStrInput\nfrom langflow.schema import Data\n\n\nclass AssemblyAITranscriptionJobCreator(Component):\n display_name = \"AssemblyAI Start Transcript\"\n description = \"Create a transcription job for an audio file using AssemblyAI with advanced options\"\n documentation = \"https://www.assemblyai.com/docs\"\n icon = \"AssemblyAI\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Assembly API Key\",\n info=\"Your AssemblyAI API key. You can get one from https://www.assemblyai.com/\",\n required=True,\n ),\n FileInput(\n name=\"audio_file\",\n display_name=\"Audio File\",\n file_types=[\n \"3ga\",\n \"8svx\",\n \"aac\",\n \"ac3\",\n \"aif\",\n \"aiff\",\n \"alac\",\n \"amr\",\n \"ape\",\n \"au\",\n \"dss\",\n \"flac\",\n \"flv\",\n \"m4a\",\n \"m4b\",\n \"m4p\",\n \"m4r\",\n \"mp3\",\n \"mpga\",\n \"ogg\",\n \"oga\",\n \"mogg\",\n \"opus\",\n \"qcp\",\n \"tta\",\n \"voc\",\n \"wav\",\n \"wma\",\n \"wv\",\n \"webm\",\n \"mts\",\n \"m2ts\",\n \"ts\",\n \"mov\",\n \"mp2\",\n \"mp4\",\n \"m4p\",\n \"m4v\",\n \"mxf\",\n ],\n info=\"The audio file to transcribe\",\n required=True,\n ),\n MessageTextInput(\n name=\"audio_file_url\",\n display_name=\"Audio File URL\",\n info=\"The URL of the audio file to transcribe (Can be used instead of a File)\",\n advanced=True,\n ),\n DropdownInput(\n name=\"speech_model\",\n display_name=\"Speech Model\",\n options=[\n \"best\",\n \"nano\",\n ],\n value=\"best\",\n info=\"The speech model to use for the transcription\",\n advanced=True,\n ),\n BoolInput(\n name=\"language_detection\",\n display_name=\"Automatic Language Detection\",\n info=\"Enable automatic language detection\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"language_code\",\n display_name=\"Language\",\n info=(\n \"\"\"\n The language of the audio file. Can be set manually if automatic language detection is disabled.\n See https://www.assemblyai.com/docs/getting-started/supported-languages \"\"\"\n \"for a list of supported language codes.\"\n ),\n advanced=True,\n ),\n BoolInput(\n name=\"speaker_labels\",\n display_name=\"Enable Speaker Labels\",\n info=\"Enable speaker diarization\",\n ),\n MessageTextInput(\n name=\"speakers_expected\",\n display_name=\"Expected Number of Speakers\",\n info=\"Set the expected number of speakers (optional, enter a number)\",\n advanced=True,\n ),\n BoolInput(\n name=\"punctuate\",\n display_name=\"Punctuate\",\n info=\"Enable automatic punctuation\",\n advanced=True,\n value=True,\n ),\n BoolInput(\n name=\"format_text\",\n display_name=\"Format Text\",\n info=\"Enable text formatting\",\n advanced=True,\n value=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Transcript ID\", name=\"transcript_id\", method=\"create_transcription_job\"),\n ]\n\n def create_transcription_job(self) -> Data:\n aai.settings.api_key = self.api_key\n\n # Convert speakers_expected to int if it's not empty\n speakers_expected = None\n if self.speakers_expected and self.speakers_expected.strip():\n try:\n speakers_expected = int(self.speakers_expected)\n except ValueError:\n self.status = \"Error: Expected Number of Speakers must be a valid integer\"\n return Data(data={\"error\": \"Error: Expected Number of Speakers must be a valid integer\"})\n\n language_code = self.language_code or None\n\n config = aai.TranscriptionConfig(\n speech_model=self.speech_model,\n language_detection=self.language_detection,\n language_code=language_code,\n speaker_labels=self.speaker_labels,\n speakers_expected=speakers_expected,\n punctuate=self.punctuate,\n format_text=self.format_text,\n )\n\n audio = None\n if self.audio_file:\n if self.audio_file_url:\n logger.warning(\"Both an audio file an audio URL were specified. The audio URL was ignored.\")\n\n # Check if the file exists\n if not Path(self.audio_file).exists():\n self.status = \"Error: Audio file not found\"\n return Data(data={\"error\": \"Error: Audio file not found\"})\n audio = self.audio_file\n elif self.audio_file_url:\n audio = self.audio_file_url\n else:\n self.status = \"Error: Either an audio file or an audio URL must be specified\"\n return Data(data={\"error\": \"Error: Either an audio file or an audio URL must be specified\"})\n\n try:\n transcript = aai.Transcriber().submit(audio, config=config)\n except Exception as e: # noqa: BLE001\n logger.opt(exception=True).debug(\"Error submitting transcription job\")\n self.status = f\"An error occurred: {e}\"\n return Data(data={\"error\": f\"An error occurred: {e}\"})\n\n if transcript.error:\n self.status = transcript.error\n return Data(data={\"error\": transcript.error})\n result = Data(data={\"transcript_id\": transcript.id})\n self.status = result\n return result\n"
+ "value": "from pathlib import Path\n\nimport assemblyai as aai\nfrom loguru import logger\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.io import BoolInput, DropdownInput, FileInput, MessageTextInput, Output, SecretStrInput\nfrom langflow.schema.data import Data\n\n\nclass AssemblyAITranscriptionJobCreator(Component):\n display_name = \"AssemblyAI Start Transcript\"\n description = \"Create a transcription job for an audio file using AssemblyAI with advanced options\"\n documentation = \"https://www.assemblyai.com/docs\"\n icon = \"AssemblyAI\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Assembly API Key\",\n info=\"Your AssemblyAI API key. You can get one from https://www.assemblyai.com/\",\n required=True,\n ),\n FileInput(\n name=\"audio_file\",\n display_name=\"Audio File\",\n file_types=[\n \"3ga\",\n \"8svx\",\n \"aac\",\n \"ac3\",\n \"aif\",\n \"aiff\",\n \"alac\",\n \"amr\",\n \"ape\",\n \"au\",\n \"dss\",\n \"flac\",\n \"flv\",\n \"m4a\",\n \"m4b\",\n \"m4p\",\n \"m4r\",\n \"mp3\",\n \"mpga\",\n \"ogg\",\n \"oga\",\n \"mogg\",\n \"opus\",\n \"qcp\",\n \"tta\",\n \"voc\",\n \"wav\",\n \"wma\",\n \"wv\",\n \"webm\",\n \"mts\",\n \"m2ts\",\n \"ts\",\n \"mov\",\n \"mp2\",\n \"mp4\",\n \"m4p\",\n \"m4v\",\n \"mxf\",\n ],\n info=\"The audio file to transcribe\",\n required=True,\n ),\n MessageTextInput(\n name=\"audio_file_url\",\n display_name=\"Audio File URL\",\n info=\"The URL of the audio file to transcribe (Can be used instead of a File)\",\n advanced=True,\n ),\n DropdownInput(\n name=\"speech_model\",\n display_name=\"Speech Model\",\n options=[\n \"best\",\n \"nano\",\n ],\n value=\"best\",\n info=\"The speech model to use for the transcription\",\n advanced=True,\n ),\n BoolInput(\n name=\"language_detection\",\n display_name=\"Automatic Language Detection\",\n info=\"Enable automatic language detection\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"language_code\",\n display_name=\"Language\",\n info=(\n \"\"\"\n The language of the audio file. Can be set manually if automatic language detection is disabled.\n See https://www.assemblyai.com/docs/getting-started/supported-languages \"\"\"\n \"for a list of supported language codes.\"\n ),\n advanced=True,\n ),\n BoolInput(\n name=\"speaker_labels\",\n display_name=\"Enable Speaker Labels\",\n info=\"Enable speaker diarization\",\n ),\n MessageTextInput(\n name=\"speakers_expected\",\n display_name=\"Expected Number of Speakers\",\n info=\"Set the expected number of speakers (optional, enter a number)\",\n advanced=True,\n ),\n BoolInput(\n name=\"punctuate\",\n display_name=\"Punctuate\",\n info=\"Enable automatic punctuation\",\n advanced=True,\n value=True,\n ),\n BoolInput(\n name=\"format_text\",\n display_name=\"Format Text\",\n info=\"Enable text formatting\",\n advanced=True,\n value=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Transcript ID\", name=\"transcript_id\", method=\"create_transcription_job\"),\n ]\n\n def create_transcription_job(self) -> Data:\n aai.settings.api_key = self.api_key\n\n # Convert speakers_expected to int if it's not empty\n speakers_expected = None\n if self.speakers_expected and self.speakers_expected.strip():\n try:\n speakers_expected = int(self.speakers_expected)\n except ValueError:\n self.status = \"Error: Expected Number of Speakers must be a valid integer\"\n return Data(data={\"error\": \"Error: Expected Number of Speakers must be a valid integer\"})\n\n language_code = self.language_code or None\n\n config = aai.TranscriptionConfig(\n speech_model=self.speech_model,\n language_detection=self.language_detection,\n language_code=language_code,\n speaker_labels=self.speaker_labels,\n speakers_expected=speakers_expected,\n punctuate=self.punctuate,\n format_text=self.format_text,\n )\n\n audio = None\n if self.audio_file:\n if self.audio_file_url:\n logger.warning(\"Both an audio file an audio URL were specified. The audio URL was ignored.\")\n\n # Check if the file exists\n if not Path(self.audio_file).exists():\n self.status = \"Error: Audio file not found\"\n return Data(data={\"error\": \"Error: Audio file not found\"})\n audio = self.audio_file\n elif self.audio_file_url:\n audio = self.audio_file_url\n else:\n self.status = \"Error: Either an audio file or an audio URL must be specified\"\n return Data(data={\"error\": \"Error: Either an audio file or an audio URL must be specified\"})\n\n try:\n transcript = aai.Transcriber().submit(audio, config=config)\n except Exception as e: # noqa: BLE001\n logger.opt(exception=True).debug(\"Error submitting transcription job\")\n self.status = f\"An error occurred: {e}\"\n return Data(data={\"error\": f\"An error occurred: {e}\"})\n\n if transcript.error:\n self.status = transcript.error\n return Data(data={\"error\": transcript.error})\n result = Data(data={\"transcript_id\": transcript.id})\n self.status = result\n return result\n"
},
"format_text": {
"_input_type": "BoolInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Memory Chatbot.json b/src/backend/base/langflow/initial_setup/starter_projects/Memory Chatbot.json
index dbff1c17e..f73557d20 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Memory Chatbot.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Memory Chatbot.json
@@ -255,7 +255,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
@@ -578,7 +578,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -887,7 +887,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"memory": {
"advanced": false,
@@ -1089,7 +1089,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
+ "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
},
"input_value": {
"_input_type": "MessageInput",
@@ -1561,7 +1561,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any, cast\n\nfrom langflow.custom import Component\nfrom langflow.inputs import HandleInput\nfrom langflow.io import DropdownInput, IntInput, MessageTextInput, MultilineInput, Output, TabInput\nfrom langflow.memory import aget_messages, astore_message\nfrom langflow.schema import Data, dotdict\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.utils.component_utils import set_current_fields, set_field_display\nfrom langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI, MESSAGE_SENDER_USER\n\n\nclass MemoryComponent(Component):\n display_name = \"Message History\"\n description = \"Stores or retrieves stored chat messages from Langflow tables or an external memory.\"\n icon = \"message-square-more\"\n name = \"Memory\"\n default_keys = [\"mode\", \"memory\"]\n mode_config = {\n \"Store\": [\"message\", \"memory\", \"sender\", \"sender_name\", \"session_id\"],\n \"Retrieve\": [\"n_messages\", \"order\", \"template\", \"memory\"],\n }\n\n inputs = [\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Retrieve\", \"Store\"],\n value=\"Retrieve\",\n info=\"Operation mode: Store messages or Retrieve messages.\",\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"message\",\n display_name=\"Message\",\n info=\"The chat message to be stored.\",\n tool_mode=True,\n dynamic=True,\n show=False,\n ),\n HandleInput(\n name=\"memory\",\n display_name=\"External Memory\",\n input_types=[\"Memory\"],\n info=\"Retrieve messages from an external memory. If empty, it will use the Langflow tables.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER, \"Machine and User\"],\n value=\"Machine and User\",\n info=\"Filter by sender type.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Filter by sender name.\",\n advanced=True,\n show=False,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Messages\",\n value=100,\n info=\"Number of messages to retrieve.\",\n advanced=True,\n show=False,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"order\",\n display_name=\"Order\",\n options=[\"Ascending\", \"Descending\"],\n value=\"Ascending\",\n info=\"Order of the messages.\",\n advanced=True,\n tool_mode=True,\n required=True,\n show=False,\n ),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. \"\n \"It can contain the keys {text}, {sender} or any other key in the message data.\",\n value=\"{sender_name}: {text}\",\n advanced=True,\n show=False,\n ),\n ]\n\n outputs = [Output(display_name=\"Messages\", name=\"dataframe\", method=\"retrieve_messages_dataframe\", dynamic=True)]\n\n def update_outputs(self, frontend_node: dict, field_name: str, field_value: Any) -> dict:\n \"\"\"Dynamically show only the relevant output based on the selected output type.\"\"\"\n if field_name == \"mode\":\n # Start with empty outputs\n frontend_node[\"outputs\"] = []\n if field_value == \"Store\":\n frontend_node[\"outputs\"] = [\n Output(\n display_name=\"Stored Messages\",\n name=\"stored_messages\",\n method=\"store_message\",\n hidden=True,\n dynamic=True,\n )\n ]\n if field_value == \"Retrieve\":\n frontend_node[\"outputs\"] = [\n Output(\n display_name=\"Messages\", name=\"dataframe\", method=\"retrieve_messages_dataframe\", dynamic=True\n )\n ]\n return frontend_node\n\n async def retrieve_messages(self) -> Data:\n sender = self.sender\n sender_name = self.sender_name\n session_id = self.session_id\n n_messages = self.n_messages\n order = \"DESC\" if self.order == \"Descending\" else \"ASC\"\n\n if sender == \"Machine and User\":\n sender = None\n\n if self.memory and not hasattr(self.memory, \"aget_messages\"):\n memory_name = type(self.memory).__name__\n err_msg = f\"External Memory object ({memory_name}) must have 'aget_messages' method.\"\n raise AttributeError(err_msg)\n # Check if n_messages is None or 0\n if n_messages == 0:\n stored = []\n elif self.memory:\n # override session_id\n self.memory.session_id = session_id\n\n stored = await self.memory.aget_messages()\n # langchain memories are supposed to return messages in ascending order\n if order == \"DESC\":\n stored = stored[::-1]\n if n_messages:\n stored = stored[:n_messages]\n stored = [Message.from_lc_message(m) for m in stored]\n if sender:\n expected_type = MESSAGE_SENDER_AI if sender == MESSAGE_SENDER_AI else MESSAGE_SENDER_USER\n stored = [m for m in stored if m.type == expected_type]\n else:\n stored = await aget_messages(\n sender=sender,\n sender_name=sender_name,\n session_id=session_id,\n limit=n_messages,\n order=order,\n )\n self.status = stored\n return cast(Data, stored)\n\n async def retrieve_messages_dataframe(self) -> DataFrame:\n \"\"\"Convert the retrieved messages into a DataFrame.\n\n Returns:\n DataFrame: A DataFrame containing the message data.\n \"\"\"\n messages = await self.retrieve_messages()\n return DataFrame(messages)\n\n async def store_message(self) -> Message:\n message = Message(text=self.message) if isinstance(self.message, str) else self.message\n\n message.session_id = self.session_id or message.session_id\n message.sender = self.sender or message.sender or MESSAGE_SENDER_AI\n message.sender_name = self.sender_name or message.sender_name or MESSAGE_SENDER_NAME_AI\n\n stored_messages: list[Message] = []\n\n if self.memory:\n self.memory.session_id = message.session_id\n lc_message = message.to_lc_message()\n await self.memory.aadd_messages([lc_message])\n\n stored_messages = await self.memory.aget_messages() or []\n\n stored_messages = [Message.from_lc_message(m) for m in stored_messages] if stored_messages else []\n\n if message.sender:\n stored_messages = [m for m in stored_messages if m.sender == message.sender]\n else:\n await astore_message(message, flow_id=self.graph.flow_id)\n stored_messages = (\n await aget_messages(\n session_id=message.session_id, sender_name=message.sender_name, sender=message.sender\n )\n or []\n )\n\n if not stored_messages:\n msg = \"No messages were stored. Please ensure that the session ID and sender are properly set.\"\n raise ValueError(msg)\n\n stored_message = stored_messages[0]\n self.status = stored_message\n return stored_message\n\n def update_build_config(\n self,\n build_config: dotdict,\n field_value: Any, # noqa: ARG002\n field_name: str | None = None, # noqa: ARG002\n ) -> dotdict:\n return set_current_fields(\n build_config=build_config,\n action_fields=self.mode_config,\n selected_action=build_config[\"mode\"][\"value\"],\n default_fields=self.default_keys,\n func=set_field_display,\n )\n"
+ "value": "from typing import Any, cast\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DropdownInput, HandleInput, IntInput, MessageTextInput, MultilineInput, TabInput\nfrom langflow.memory import aget_messages, astore_message\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\nfrom langflow.template.field.base import Output\nfrom langflow.utils.component_utils import set_current_fields, set_field_display\nfrom langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI, MESSAGE_SENDER_USER\n\n\nclass MemoryComponent(Component):\n display_name = \"Message History\"\n description = \"Stores or retrieves stored chat messages from Langflow tables or an external memory.\"\n icon = \"message-square-more\"\n name = \"Memory\"\n default_keys = [\"mode\", \"memory\"]\n mode_config = {\n \"Store\": [\"message\", \"memory\", \"sender\", \"sender_name\", \"session_id\"],\n \"Retrieve\": [\"n_messages\", \"order\", \"template\", \"memory\"],\n }\n\n inputs = [\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Retrieve\", \"Store\"],\n value=\"Retrieve\",\n info=\"Operation mode: Store messages or Retrieve messages.\",\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"message\",\n display_name=\"Message\",\n info=\"The chat message to be stored.\",\n tool_mode=True,\n dynamic=True,\n show=False,\n ),\n HandleInput(\n name=\"memory\",\n display_name=\"External Memory\",\n input_types=[\"Memory\"],\n info=\"Retrieve messages from an external memory. If empty, it will use the Langflow tables.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER, \"Machine and User\"],\n value=\"Machine and User\",\n info=\"Filter by sender type.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Filter by sender name.\",\n advanced=True,\n show=False,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Messages\",\n value=100,\n info=\"Number of messages to retrieve.\",\n advanced=True,\n show=False,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"order\",\n display_name=\"Order\",\n options=[\"Ascending\", \"Descending\"],\n value=\"Ascending\",\n info=\"Order of the messages.\",\n advanced=True,\n tool_mode=True,\n required=True,\n show=False,\n ),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. \"\n \"It can contain the keys {text}, {sender} or any other key in the message data.\",\n value=\"{sender_name}: {text}\",\n advanced=True,\n show=False,\n ),\n ]\n\n outputs = [Output(display_name=\"Messages\", name=\"dataframe\", method=\"retrieve_messages_dataframe\", dynamic=True)]\n\n def update_outputs(self, frontend_node: dict, field_name: str, field_value: Any) -> dict:\n \"\"\"Dynamically show only the relevant output based on the selected output type.\"\"\"\n if field_name == \"mode\":\n # Start with empty outputs\n frontend_node[\"outputs\"] = []\n if field_value == \"Store\":\n frontend_node[\"outputs\"] = [\n Output(\n display_name=\"Stored Messages\",\n name=\"stored_messages\",\n method=\"store_message\",\n hidden=True,\n dynamic=True,\n )\n ]\n if field_value == \"Retrieve\":\n frontend_node[\"outputs\"] = [\n Output(\n display_name=\"Messages\", name=\"dataframe\", method=\"retrieve_messages_dataframe\", dynamic=True\n )\n ]\n return frontend_node\n\n async def retrieve_messages(self) -> Data:\n sender = self.sender\n sender_name = self.sender_name\n session_id = self.session_id\n n_messages = self.n_messages\n order = \"DESC\" if self.order == \"Descending\" else \"ASC\"\n\n if sender == \"Machine and User\":\n sender = None\n\n if self.memory and not hasattr(self.memory, \"aget_messages\"):\n memory_name = type(self.memory).__name__\n err_msg = f\"External Memory object ({memory_name}) must have 'aget_messages' method.\"\n raise AttributeError(err_msg)\n # Check if n_messages is None or 0\n if n_messages == 0:\n stored = []\n elif self.memory:\n # override session_id\n self.memory.session_id = session_id\n\n stored = await self.memory.aget_messages()\n # langchain memories are supposed to return messages in ascending order\n if order == \"DESC\":\n stored = stored[::-1]\n if n_messages:\n stored = stored[:n_messages]\n stored = [Message.from_lc_message(m) for m in stored]\n if sender:\n expected_type = MESSAGE_SENDER_AI if sender == MESSAGE_SENDER_AI else MESSAGE_SENDER_USER\n stored = [m for m in stored if m.type == expected_type]\n else:\n stored = await aget_messages(\n sender=sender,\n sender_name=sender_name,\n session_id=session_id,\n limit=n_messages,\n order=order,\n )\n self.status = stored\n return cast(Data, stored)\n\n async def retrieve_messages_dataframe(self) -> DataFrame:\n \"\"\"Convert the retrieved messages into a DataFrame.\n\n Returns:\n DataFrame: A DataFrame containing the message data.\n \"\"\"\n messages = await self.retrieve_messages()\n return DataFrame(messages)\n\n async def store_message(self) -> Message:\n message = Message(text=self.message) if isinstance(self.message, str) else self.message\n\n message.session_id = self.session_id or message.session_id\n message.sender = self.sender or message.sender or MESSAGE_SENDER_AI\n message.sender_name = self.sender_name or message.sender_name or MESSAGE_SENDER_NAME_AI\n\n stored_messages: list[Message] = []\n\n if self.memory:\n self.memory.session_id = message.session_id\n lc_message = message.to_lc_message()\n await self.memory.aadd_messages([lc_message])\n\n stored_messages = await self.memory.aget_messages() or []\n\n stored_messages = [Message.from_lc_message(m) for m in stored_messages] if stored_messages else []\n\n if message.sender:\n stored_messages = [m for m in stored_messages if m.sender == message.sender]\n else:\n await astore_message(message, flow_id=self.graph.flow_id)\n stored_messages = (\n await aget_messages(\n session_id=message.session_id, sender_name=message.sender_name, sender=message.sender\n )\n or []\n )\n\n if not stored_messages:\n msg = \"No messages were stored. Please ensure that the session ID and sender are properly set.\"\n raise ValueError(msg)\n\n stored_message = stored_messages[0]\n self.status = stored_message\n return stored_message\n\n def update_build_config(\n self,\n build_config: dotdict,\n field_value: Any, # noqa: ARG002\n field_name: str | None = None, # noqa: ARG002\n ) -> dotdict:\n return set_current_fields(\n build_config=build_config,\n action_fields=self.mode_config,\n selected_action=build_config[\"mode\"][\"value\"],\n default_fields=self.default_keys,\n func=set_field_display,\n )\n"
},
"memory": {
"_input_type": "HandleInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/News Aggregator.json b/src/backend/base/langflow/initial_setup/starter_projects/News Aggregator.json
index 3f7263eb3..55d46889d 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/News Aggregator.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/News Aggregator.json
@@ -259,7 +259,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import httpx\nfrom loguru import logger\n\nfrom langflow.custom import Component\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.io import (\n BoolInput,\n DropdownInput,\n IntInput,\n MessageTextInput,\n MultilineInput,\n Output,\n SecretStrInput,\n)\nfrom langflow.schema import Data\n\n\nclass AgentQL(Component):\n display_name = \"Extract Web Data\"\n description = \"Extracts structured data from a web page using an AgentQL query or a Natural Language description.\"\n documentation: str = \"https://docs.agentql.com/rest-api/api-reference\"\n icon = \"AgentQL\"\n name = \"AgentQL\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n required=True,\n password=True,\n info=\"Your AgentQL API key from dev.agentql.com\",\n ),\n MessageTextInput(\n name=\"url\",\n display_name=\"URL\",\n required=True,\n info=\"The URL of the public web page you want to extract data from.\",\n tool_mode=True,\n ),\n MultilineInput(\n name=\"query\",\n display_name=\"AgentQL Query\",\n required=False,\n info=\"The AgentQL query to execute. Learn more at https://docs.agentql.com/agentql-query or use a prompt.\",\n tool_mode=True,\n ),\n MultilineInput(\n name=\"prompt\",\n display_name=\"Prompt\",\n required=False,\n info=\"A Natural Language description of the data to extract from the page. Alternative to AgentQL query.\",\n tool_mode=True,\n ),\n BoolInput(\n name=\"is_stealth_mode_enabled\",\n display_name=\"Enable Stealth Mode (Beta)\",\n info=\"Enable experimental anti-bot evasion strategies. May not work for all websites at all times.\",\n value=False,\n advanced=True,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"Seconds to wait for a request.\",\n value=900,\n advanced=True,\n ),\n DropdownInput(\n name=\"mode\",\n display_name=\"Request Mode\",\n info=\"'standard' uses deep data analysis, while 'fast' trades some depth of analysis for speed.\",\n options=[\"fast\", \"standard\"],\n value=\"fast\",\n advanced=True,\n ),\n IntInput(\n name=\"wait_for\",\n display_name=\"Wait For\",\n info=\"Seconds to wait for the page to load before extracting data.\",\n value=0,\n range_spec=RangeSpec(min=0, max=10, step_type=\"int\"),\n advanced=True,\n ),\n BoolInput(\n name=\"is_scroll_to_bottom_enabled\",\n display_name=\"Enable scroll to bottom\",\n info=\"Scroll to bottom of the page before extracting data.\",\n value=False,\n advanced=True,\n ),\n BoolInput(\n name=\"is_screenshot_enabled\",\n display_name=\"Enable screenshot\",\n info=\"Take a screenshot before extracting data. Returned in 'metadata' as a Base64 string.\",\n value=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"data\", method=\"build_output\"),\n ]\n\n def build_output(self) -> Data:\n endpoint = \"https://api.agentql.com/v1/query-data\"\n headers = {\n \"X-API-Key\": self.api_key,\n \"Content-Type\": \"application/json\",\n \"X-TF-Request-Origin\": \"langflow\",\n }\n\n payload = {\n \"url\": self.url,\n \"query\": self.query,\n \"prompt\": self.prompt,\n \"params\": {\n \"mode\": self.mode,\n \"wait_for\": self.wait_for,\n \"is_scroll_to_bottom_enabled\": self.is_scroll_to_bottom_enabled,\n \"is_screenshot_enabled\": self.is_screenshot_enabled,\n },\n \"metadata\": {\n \"experimental_stealth_mode_enabled\": self.is_stealth_mode_enabled,\n },\n }\n\n if not self.prompt and not self.query:\n self.status = \"Either Query or Prompt must be provided.\"\n raise ValueError(self.status)\n if self.prompt and self.query:\n self.status = \"Both Query and Prompt can't be provided at the same time.\"\n raise ValueError(self.status)\n\n try:\n response = httpx.post(endpoint, headers=headers, json=payload, timeout=self.timeout)\n response.raise_for_status()\n\n json = response.json()\n data = Data(result=json[\"data\"], metadata=json[\"metadata\"])\n\n except httpx.HTTPStatusError as e:\n response = e.response\n if response.status_code == httpx.codes.UNAUTHORIZED:\n self.status = \"Please, provide a valid API Key. You can create one at https://dev.agentql.com.\"\n else:\n try:\n error_json = response.json()\n logger.error(\n f\"Failure response: '{response.status_code} {response.reason_phrase}' with body: {error_json}\"\n )\n msg = error_json[\"error_info\"] if \"error_info\" in error_json else error_json[\"detail\"]\n except (ValueError, TypeError):\n msg = f\"HTTP {e}.\"\n self.status = msg\n raise ValueError(self.status) from e\n\n else:\n self.status = data\n return data\n"
+ "value": "import httpx\nfrom loguru import logger\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.io import (\n BoolInput,\n DropdownInput,\n IntInput,\n MessageTextInput,\n MultilineInput,\n Output,\n SecretStrInput,\n)\nfrom langflow.schema.data import Data\n\n\nclass AgentQL(Component):\n display_name = \"Extract Web Data\"\n description = \"Extracts structured data from a web page using an AgentQL query or a Natural Language description.\"\n documentation: str = \"https://docs.agentql.com/rest-api/api-reference\"\n icon = \"AgentQL\"\n name = \"AgentQL\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n required=True,\n password=True,\n info=\"Your AgentQL API key from dev.agentql.com\",\n ),\n MessageTextInput(\n name=\"url\",\n display_name=\"URL\",\n required=True,\n info=\"The URL of the public web page you want to extract data from.\",\n tool_mode=True,\n ),\n MultilineInput(\n name=\"query\",\n display_name=\"AgentQL Query\",\n required=False,\n info=\"The AgentQL query to execute. Learn more at https://docs.agentql.com/agentql-query or use a prompt.\",\n tool_mode=True,\n ),\n MultilineInput(\n name=\"prompt\",\n display_name=\"Prompt\",\n required=False,\n info=\"A Natural Language description of the data to extract from the page. Alternative to AgentQL query.\",\n tool_mode=True,\n ),\n BoolInput(\n name=\"is_stealth_mode_enabled\",\n display_name=\"Enable Stealth Mode (Beta)\",\n info=\"Enable experimental anti-bot evasion strategies. May not work for all websites at all times.\",\n value=False,\n advanced=True,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"Seconds to wait for a request.\",\n value=900,\n advanced=True,\n ),\n DropdownInput(\n name=\"mode\",\n display_name=\"Request Mode\",\n info=\"'standard' uses deep data analysis, while 'fast' trades some depth of analysis for speed.\",\n options=[\"fast\", \"standard\"],\n value=\"fast\",\n advanced=True,\n ),\n IntInput(\n name=\"wait_for\",\n display_name=\"Wait For\",\n info=\"Seconds to wait for the page to load before extracting data.\",\n value=0,\n range_spec=RangeSpec(min=0, max=10, step_type=\"int\"),\n advanced=True,\n ),\n BoolInput(\n name=\"is_scroll_to_bottom_enabled\",\n display_name=\"Enable scroll to bottom\",\n info=\"Scroll to bottom of the page before extracting data.\",\n value=False,\n advanced=True,\n ),\n BoolInput(\n name=\"is_screenshot_enabled\",\n display_name=\"Enable screenshot\",\n info=\"Take a screenshot before extracting data. Returned in 'metadata' as a Base64 string.\",\n value=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"data\", method=\"build_output\"),\n ]\n\n def build_output(self) -> Data:\n endpoint = \"https://api.agentql.com/v1/query-data\"\n headers = {\n \"X-API-Key\": self.api_key,\n \"Content-Type\": \"application/json\",\n \"X-TF-Request-Origin\": \"langflow\",\n }\n\n payload = {\n \"url\": self.url,\n \"query\": self.query,\n \"prompt\": self.prompt,\n \"params\": {\n \"mode\": self.mode,\n \"wait_for\": self.wait_for,\n \"is_scroll_to_bottom_enabled\": self.is_scroll_to_bottom_enabled,\n \"is_screenshot_enabled\": self.is_screenshot_enabled,\n },\n \"metadata\": {\n \"experimental_stealth_mode_enabled\": self.is_stealth_mode_enabled,\n },\n }\n\n if not self.prompt and not self.query:\n self.status = \"Either Query or Prompt must be provided.\"\n raise ValueError(self.status)\n if self.prompt and self.query:\n self.status = \"Both Query and Prompt can't be provided at the same time.\"\n raise ValueError(self.status)\n\n try:\n response = httpx.post(endpoint, headers=headers, json=payload, timeout=self.timeout)\n response.raise_for_status()\n\n json = response.json()\n data = Data(result=json[\"data\"], metadata=json[\"metadata\"])\n\n except httpx.HTTPStatusError as e:\n response = e.response\n if response.status_code == httpx.codes.UNAUTHORIZED:\n self.status = \"Please, provide a valid API Key. You can create one at https://dev.agentql.com.\"\n else:\n try:\n error_json = response.json()\n logger.error(\n f\"Failure response: '{response.status_code} {response.reason_phrase}' with body: {error_json}\"\n )\n msg = error_json[\"error_info\"] if \"error_info\" in error_json else error_json[\"detail\"]\n except (ValueError, TypeError):\n msg = f\"HTTP {e}.\"\n self.status = msg\n raise ValueError(self.status) from e\n\n else:\n self.status = data\n return data\n"
},
"is_screenshot_enabled": {
"_input_type": "BoolInput",
@@ -693,7 +693,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
@@ -1093,7 +1093,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
+ "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
},
"handle_parsing_errors": {
"_input_type": "BoolInput",
@@ -1930,7 +1930,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Pokédex Agent.json b/src/backend/base/langflow/initial_setup/starter_projects/Pokédex Agent.json
index 9c51acb2e..877928cc2 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Pokédex Agent.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Pokédex Agent.json
@@ -202,7 +202,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
@@ -533,7 +533,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -922,7 +922,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import json\nimport re\nimport tempfile\nfrom datetime import datetime, timezone\nfrom pathlib import Path\nfrom typing import Any\nfrom urllib.parse import parse_qsl, urlencode, urlparse, urlunparse\n\nimport aiofiles\nimport aiofiles.os as aiofiles_os\nimport httpx\nimport validators\n\nfrom langflow.base.curl.parse import parse_context\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import TabInput\nfrom langflow.io import (\n BoolInput,\n DataInput,\n DropdownInput,\n IntInput,\n MessageTextInput,\n MultilineInput,\n Output,\n TableInput,\n)\nfrom langflow.schema import Data\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.services.deps import get_settings_service\nfrom langflow.utils.component_utils import set_current_fields, set_field_advanced, set_field_display\n\n# Define fields for each mode\nMODE_FIELDS = {\n \"URL\": [\n \"url_input\",\n \"method\",\n ],\n \"cURL\": [\"curl_input\"],\n}\n\n# Fields that should always be visible\nDEFAULT_FIELDS = [\"mode\"]\n\n\nclass APIRequestComponent(Component):\n display_name = \"API Request\"\n description = \"Make HTTP requests using URL or cURL commands.\"\n icon = \"Globe\"\n name = \"APIRequest\"\n\n inputs = [\n MessageTextInput(\n name=\"url_input\",\n display_name=\"URL\",\n info=\"Enter the URL for the request.\",\n advanced=False,\n tool_mode=True,\n ),\n MultilineInput(\n name=\"curl_input\",\n display_name=\"cURL\",\n info=(\n \"Paste a curl command to populate the fields. \"\n \"This will fill in the dictionary fields for headers and body.\"\n ),\n real_time_refresh=True,\n tool_mode=True,\n advanced=True,\n show=False,\n ),\n DropdownInput(\n name=\"method\",\n display_name=\"Method\",\n options=[\"GET\", \"POST\", \"PATCH\", \"PUT\", \"DELETE\"],\n value=\"GET\",\n info=\"The HTTP method to use.\",\n real_time_refresh=True,\n ),\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"URL\", \"cURL\"],\n value=\"URL\",\n info=\"Enable cURL mode to populate fields from a cURL command.\",\n real_time_refresh=True,\n ),\n DataInput(\n name=\"query_params\",\n display_name=\"Query Parameters\",\n info=\"The query parameters to append to the URL.\",\n advanced=True,\n ),\n TableInput(\n name=\"body\",\n display_name=\"Body\",\n info=\"The body to send with the request as a dictionary (for POST, PATCH, PUT).\",\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Parameter name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"description\": \"Parameter value\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n advanced=True,\n real_time_refresh=True,\n ),\n TableInput(\n name=\"headers\",\n display_name=\"Headers\",\n info=\"The headers to send with the request\",\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Header\",\n \"type\": \"str\",\n \"description\": \"Header name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Header value\",\n },\n ],\n value=[{\"key\": \"User-Agent\", \"value\": get_settings_service().settings.user_agent}],\n advanced=True,\n input_types=[\"Data\"],\n real_time_refresh=True,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n value=30,\n info=\"The timeout to use for the request.\",\n advanced=True,\n ),\n BoolInput(\n name=\"follow_redirects\",\n display_name=\"Follow Redirects\",\n value=True,\n info=\"Whether to follow http redirects.\",\n advanced=True,\n ),\n BoolInput(\n name=\"save_to_file\",\n display_name=\"Save to File\",\n value=False,\n info=\"Save the API response to a temporary file\",\n advanced=True,\n ),\n BoolInput(\n name=\"include_httpx_metadata\",\n display_name=\"Include HTTPx Metadata\",\n value=False,\n info=(\n \"Include properties such as headers, status_code, response_headers, \"\n \"and redirection_history in the output.\"\n ),\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"API Response\", name=\"data\", method=\"make_api_request\"),\n ]\n\n def _parse_json_value(self, value: Any) -> Any:\n \"\"\"Parse a value that might be a JSON string.\"\"\"\n if not isinstance(value, str):\n return value\n\n try:\n parsed = json.loads(value)\n except json.JSONDecodeError:\n return value\n else:\n return parsed\n\n def _process_body(self, body: Any) -> dict:\n \"\"\"Process the body input into a valid dictionary.\"\"\"\n if body is None:\n return {}\n if isinstance(body, dict):\n return self._process_dict_body(body)\n if isinstance(body, str):\n return self._process_string_body(body)\n if isinstance(body, list):\n return self._process_list_body(body)\n return {}\n\n def _process_dict_body(self, body: dict) -> dict:\n \"\"\"Process dictionary body by parsing JSON values.\"\"\"\n return {k: self._parse_json_value(v) for k, v in body.items()}\n\n def _process_string_body(self, body: str) -> dict:\n \"\"\"Process string body by attempting JSON parse.\"\"\"\n try:\n return self._process_body(json.loads(body))\n except json.JSONDecodeError:\n return {\"data\": body}\n\n def _process_list_body(self, body: list) -> dict:\n \"\"\"Process list body by converting to key-value dictionary.\"\"\"\n processed_dict = {}\n try:\n for item in body:\n if not self._is_valid_key_value_item(item):\n continue\n key = item[\"key\"]\n value = self._parse_json_value(item[\"value\"])\n processed_dict[key] = value\n except (KeyError, TypeError, ValueError) as e:\n self.log(f\"Failed to process body list: {e}\")\n return {}\n return processed_dict\n\n def _is_valid_key_value_item(self, item: Any) -> bool:\n \"\"\"Check if an item is a valid key-value dictionary.\"\"\"\n return isinstance(item, dict) and \"key\" in item and \"value\" in item\n\n def parse_curl(self, curl: str, build_config: dotdict) -> dotdict:\n \"\"\"Parse a cURL command and update build configuration.\"\"\"\n try:\n parsed = parse_context(curl)\n\n # Update basic configuration\n url = parsed.url\n # Normalize URL before setting it\n url = self._normalize_url(url)\n\n build_config[\"url_input\"][\"value\"] = url\n build_config[\"method\"][\"value\"] = parsed.method.upper()\n\n # Process headers\n headers_list = [{\"key\": k, \"value\": v} for k, v in parsed.headers.items()]\n build_config[\"headers\"][\"value\"] = headers_list\n\n # Process body data\n if not parsed.data:\n build_config[\"body\"][\"value\"] = []\n elif parsed.data:\n try:\n json_data = json.loads(parsed.data)\n if isinstance(json_data, dict):\n body_list = [\n {\"key\": k, \"value\": json.dumps(v) if isinstance(v, dict | list) else str(v)}\n for k, v in json_data.items()\n ]\n build_config[\"body\"][\"value\"] = body_list\n else:\n build_config[\"body\"][\"value\"] = [{\"key\": \"data\", \"value\": json.dumps(json_data)}]\n except json.JSONDecodeError:\n build_config[\"body\"][\"value\"] = [{\"key\": \"data\", \"value\": parsed.data}]\n\n except Exception as exc:\n msg = f\"Error parsing curl: {exc}\"\n self.log(msg)\n raise ValueError(msg) from exc\n\n return build_config\n\n def _normalize_url(self, url: str) -> str:\n \"\"\"Normalize URL by adding https:// if no protocol is specified.\"\"\"\n if not url or not isinstance(url, str):\n msg = \"URL cannot be empty\"\n raise ValueError(msg)\n\n url = url.strip()\n if url.startswith((\"http://\", \"https://\")):\n return url\n return f\"https://{url}\"\n\n async def make_request(\n self,\n client: httpx.AsyncClient,\n method: str,\n url: str,\n headers: dict | None = None,\n body: Any = None,\n timeout: int = 5,\n *,\n follow_redirects: bool = True,\n save_to_file: bool = False,\n include_httpx_metadata: bool = False,\n ) -> Data:\n method = method.upper()\n if method not in {\"GET\", \"POST\", \"PATCH\", \"PUT\", \"DELETE\"}:\n msg = f\"Unsupported method: {method}\"\n raise ValueError(msg)\n\n processed_body = self._process_body(body)\n redirection_history = []\n\n try:\n # Prepare request parameters\n request_params = {\n \"method\": method,\n \"url\": url,\n \"headers\": headers,\n \"json\": processed_body,\n \"timeout\": timeout,\n \"follow_redirects\": follow_redirects,\n }\n response = await client.request(**request_params)\n\n redirection_history = [\n {\n \"url\": redirect.headers.get(\"Location\", str(redirect.url)),\n \"status_code\": redirect.status_code,\n }\n for redirect in response.history\n ]\n\n is_binary, file_path = await self._response_info(response, with_file_path=save_to_file)\n response_headers = self._headers_to_dict(response.headers)\n\n # Base metadata\n metadata = {\n \"source\": url,\n \"status_code\": response.status_code,\n \"response_headers\": response_headers,\n }\n\n if redirection_history:\n metadata[\"redirection_history\"] = redirection_history\n\n if save_to_file:\n mode = \"wb\" if is_binary else \"w\"\n encoding = response.encoding if mode == \"w\" else None\n if file_path:\n await aiofiles_os.makedirs(file_path.parent, exist_ok=True)\n if is_binary:\n async with aiofiles.open(file_path, \"wb\") as f:\n await f.write(response.content)\n await f.flush()\n else:\n async with aiofiles.open(file_path, \"w\", encoding=encoding) as f:\n await f.write(response.text)\n await f.flush()\n metadata[\"file_path\"] = str(file_path)\n\n if include_httpx_metadata:\n metadata.update({\"headers\": headers})\n return Data(data=metadata)\n\n # Handle response content\n if is_binary:\n result = response.content\n else:\n try:\n result = response.json()\n except json.JSONDecodeError:\n self.log(\"Failed to decode JSON response\")\n result = response.text.encode(\"utf-8\")\n\n metadata[\"result\"] = result\n\n if include_httpx_metadata:\n metadata.update({\"headers\": headers})\n\n return Data(data=metadata)\n except (httpx.HTTPError, httpx.RequestError, httpx.TimeoutException) as exc:\n self.log(f\"Error making request to {url}\")\n return Data(\n data={\n \"source\": url,\n \"headers\": headers,\n \"status_code\": 500,\n \"error\": str(exc),\n **({\"redirection_history\": redirection_history} if redirection_history else {}),\n },\n )\n\n def add_query_params(self, url: str, params: dict) -> str:\n \"\"\"Add query parameters to URL efficiently.\"\"\"\n if not params:\n return url\n url_parts = list(urlparse(url))\n query = dict(parse_qsl(url_parts[4]))\n query.update(params)\n url_parts[4] = urlencode(query)\n return urlunparse(url_parts)\n\n def _headers_to_dict(self, headers: httpx.Headers) -> dict[str, str]:\n \"\"\"Convert HTTP headers to a dictionary with lowercased keys.\"\"\"\n return {k.lower(): v for k, v in headers.items()}\n\n def _process_headers(self, headers: Any) -> dict:\n \"\"\"Process the headers input into a valid dictionary.\"\"\"\n if headers is None:\n return {}\n if isinstance(headers, dict):\n return headers\n if isinstance(headers, list):\n return {item[\"key\"]: item[\"value\"] for item in headers if self._is_valid_key_value_item(item)}\n return {}\n\n async def make_api_request(self) -> Data:\n \"\"\"Make HTTP request with optimized parameter handling.\"\"\"\n method = self.method\n url = self.url_input.strip() if isinstance(self.url_input, str) else \"\"\n headers = self.headers or {}\n body = self.body or {}\n timeout = self.timeout\n follow_redirects = self.follow_redirects\n save_to_file = self.save_to_file\n include_httpx_metadata = self.include_httpx_metadata\n\n # if self.mode == \"cURL\" and self.curl_input:\n # self._build_config = self.parse_curl(self.curl_input, dotdict())\n # # After parsing curl, get the normalized URL\n # url = self._build_config[\"url_input\"][\"value\"]\n\n # Normalize URL before validation\n url = self._normalize_url(url)\n\n # Validate URL\n if not validators.url(url):\n msg = f\"Invalid URL provided: {url}\"\n raise ValueError(msg)\n\n # Process query parameters\n if isinstance(self.query_params, str):\n query_params = dict(parse_qsl(self.query_params))\n else:\n query_params = self.query_params.data if self.query_params else {}\n\n # Process headers and body\n headers = self._process_headers(headers)\n body = self._process_body(body)\n url = self.add_query_params(url, query_params)\n\n async with httpx.AsyncClient() as client:\n result = await self.make_request(\n client,\n method,\n url,\n headers,\n body,\n timeout,\n follow_redirects=follow_redirects,\n save_to_file=save_to_file,\n include_httpx_metadata=include_httpx_metadata,\n )\n self.status = result\n return result\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:\n \"\"\"Update the build config based on the selected mode.\"\"\"\n if field_name != \"mode\":\n if field_name == \"curl_input\" and self.mode == \"cURL\" and self.curl_input:\n return self.parse_curl(self.curl_input, build_config)\n return build_config\n\n # print(f\"Current mode: {field_value}\")\n if field_value == \"cURL\":\n set_field_display(build_config, \"curl_input\", value=True)\n if build_config[\"curl_input\"][\"value\"]:\n build_config = self.parse_curl(build_config[\"curl_input\"][\"value\"], build_config)\n else:\n set_field_display(build_config, \"curl_input\", value=False)\n\n return set_current_fields(\n build_config=build_config,\n action_fields=MODE_FIELDS,\n selected_action=field_value,\n default_fields=DEFAULT_FIELDS,\n func=set_field_advanced,\n default_value=True,\n )\n\n async def _response_info(\n self, response: httpx.Response, *, with_file_path: bool = False\n ) -> tuple[bool, Path | None]:\n \"\"\"Determine the file path and whether the response content is binary.\n\n Args:\n response (Response): The HTTP response object.\n with_file_path (bool): Whether to save the response content to a file.\n\n Returns:\n Tuple[bool, Path | None]:\n A tuple containing a boolean indicating if the content is binary and the full file path (if applicable).\n \"\"\"\n content_type = response.headers.get(\"Content-Type\", \"\")\n is_binary = \"application/octet-stream\" in content_type or \"application/binary\" in content_type\n\n if not with_file_path:\n return is_binary, None\n\n component_temp_dir = Path(tempfile.gettempdir()) / self.__class__.__name__\n\n # Create directory asynchronously\n await aiofiles_os.makedirs(component_temp_dir, exist_ok=True)\n\n filename = None\n if \"Content-Disposition\" in response.headers:\n content_disposition = response.headers[\"Content-Disposition\"]\n filename_match = re.search(r'filename=\"(.+?)\"', content_disposition)\n if filename_match:\n extracted_filename = filename_match.group(1)\n filename = extracted_filename\n\n # Step 3: Infer file extension or use part of the request URL if no filename\n if not filename:\n # Extract the last segment of the URL path\n url_path = urlparse(str(response.request.url) if response.request else \"\").path\n base_name = Path(url_path).name # Get the last segment of the path\n if not base_name: # If the path ends with a slash or is empty\n base_name = \"response\"\n\n # Infer file extension\n content_type_to_extension = {\n \"text/plain\": \".txt\",\n \"application/json\": \".json\",\n \"image/jpeg\": \".jpg\",\n \"image/png\": \".png\",\n \"application/octet-stream\": \".bin\",\n }\n extension = content_type_to_extension.get(content_type, \".bin\" if is_binary else \".txt\")\n filename = f\"{base_name}{extension}\"\n\n # Step 4: Define the full file path\n file_path = component_temp_dir / filename\n\n # Step 5: Check if file exists asynchronously and handle accordingly\n try:\n # Try to create the file exclusively (x mode) to check existence\n async with aiofiles.open(file_path, \"x\") as _:\n pass # File created successfully, we can use this path\n except FileExistsError:\n # If file exists, append a timestamp to the filename\n timestamp = datetime.now(timezone.utc).strftime(\"%Y%m%d%H%M%S%f\")\n file_path = component_temp_dir / f\"{timestamp}-{filename}\"\n\n return is_binary, file_path\n"
+ "value": "import json\nimport re\nimport tempfile\nfrom datetime import datetime, timezone\nfrom pathlib import Path\nfrom typing import Any\nfrom urllib.parse import parse_qsl, urlencode, urlparse, urlunparse\n\nimport aiofiles\nimport aiofiles.os as aiofiles_os\nimport httpx\nimport validators\n\nfrom langflow.base.curl.parse import parse_context\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import TabInput\nfrom langflow.io import (\n BoolInput,\n DataInput,\n DropdownInput,\n IntInput,\n MessageTextInput,\n MultilineInput,\n Output,\n TableInput,\n)\nfrom langflow.schema.data import Data\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.services.deps import get_settings_service\nfrom langflow.utils.component_utils import set_current_fields, set_field_advanced, set_field_display\n\n# Define fields for each mode\nMODE_FIELDS = {\n \"URL\": [\n \"url_input\",\n \"method\",\n ],\n \"cURL\": [\"curl_input\"],\n}\n\n# Fields that should always be visible\nDEFAULT_FIELDS = [\"mode\"]\n\n\nclass APIRequestComponent(Component):\n display_name = \"API Request\"\n description = \"Make HTTP requests using URL or cURL commands.\"\n icon = \"Globe\"\n name = \"APIRequest\"\n\n inputs = [\n MessageTextInput(\n name=\"url_input\",\n display_name=\"URL\",\n info=\"Enter the URL for the request.\",\n advanced=False,\n tool_mode=True,\n ),\n MultilineInput(\n name=\"curl_input\",\n display_name=\"cURL\",\n info=(\n \"Paste a curl command to populate the fields. \"\n \"This will fill in the dictionary fields for headers and body.\"\n ),\n real_time_refresh=True,\n tool_mode=True,\n advanced=True,\n show=False,\n ),\n DropdownInput(\n name=\"method\",\n display_name=\"Method\",\n options=[\"GET\", \"POST\", \"PATCH\", \"PUT\", \"DELETE\"],\n value=\"GET\",\n info=\"The HTTP method to use.\",\n real_time_refresh=True,\n ),\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"URL\", \"cURL\"],\n value=\"URL\",\n info=\"Enable cURL mode to populate fields from a cURL command.\",\n real_time_refresh=True,\n ),\n DataInput(\n name=\"query_params\",\n display_name=\"Query Parameters\",\n info=\"The query parameters to append to the URL.\",\n advanced=True,\n ),\n TableInput(\n name=\"body\",\n display_name=\"Body\",\n info=\"The body to send with the request as a dictionary (for POST, PATCH, PUT).\",\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Parameter name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"description\": \"Parameter value\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n advanced=True,\n real_time_refresh=True,\n ),\n TableInput(\n name=\"headers\",\n display_name=\"Headers\",\n info=\"The headers to send with the request\",\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Header\",\n \"type\": \"str\",\n \"description\": \"Header name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Header value\",\n },\n ],\n value=[{\"key\": \"User-Agent\", \"value\": get_settings_service().settings.user_agent}],\n advanced=True,\n input_types=[\"Data\"],\n real_time_refresh=True,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n value=30,\n info=\"The timeout to use for the request.\",\n advanced=True,\n ),\n BoolInput(\n name=\"follow_redirects\",\n display_name=\"Follow Redirects\",\n value=True,\n info=\"Whether to follow http redirects.\",\n advanced=True,\n ),\n BoolInput(\n name=\"save_to_file\",\n display_name=\"Save to File\",\n value=False,\n info=\"Save the API response to a temporary file\",\n advanced=True,\n ),\n BoolInput(\n name=\"include_httpx_metadata\",\n display_name=\"Include HTTPx Metadata\",\n value=False,\n info=(\n \"Include properties such as headers, status_code, response_headers, \"\n \"and redirection_history in the output.\"\n ),\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"API Response\", name=\"data\", method=\"make_api_request\"),\n ]\n\n def _parse_json_value(self, value: Any) -> Any:\n \"\"\"Parse a value that might be a JSON string.\"\"\"\n if not isinstance(value, str):\n return value\n\n try:\n parsed = json.loads(value)\n except json.JSONDecodeError:\n return value\n else:\n return parsed\n\n def _process_body(self, body: Any) -> dict:\n \"\"\"Process the body input into a valid dictionary.\"\"\"\n if body is None:\n return {}\n if isinstance(body, dict):\n return self._process_dict_body(body)\n if isinstance(body, str):\n return self._process_string_body(body)\n if isinstance(body, list):\n return self._process_list_body(body)\n return {}\n\n def _process_dict_body(self, body: dict) -> dict:\n \"\"\"Process dictionary body by parsing JSON values.\"\"\"\n return {k: self._parse_json_value(v) for k, v in body.items()}\n\n def _process_string_body(self, body: str) -> dict:\n \"\"\"Process string body by attempting JSON parse.\"\"\"\n try:\n return self._process_body(json.loads(body))\n except json.JSONDecodeError:\n return {\"data\": body}\n\n def _process_list_body(self, body: list) -> dict:\n \"\"\"Process list body by converting to key-value dictionary.\"\"\"\n processed_dict = {}\n try:\n for item in body:\n if not self._is_valid_key_value_item(item):\n continue\n key = item[\"key\"]\n value = self._parse_json_value(item[\"value\"])\n processed_dict[key] = value\n except (KeyError, TypeError, ValueError) as e:\n self.log(f\"Failed to process body list: {e}\")\n return {}\n return processed_dict\n\n def _is_valid_key_value_item(self, item: Any) -> bool:\n \"\"\"Check if an item is a valid key-value dictionary.\"\"\"\n return isinstance(item, dict) and \"key\" in item and \"value\" in item\n\n def parse_curl(self, curl: str, build_config: dotdict) -> dotdict:\n \"\"\"Parse a cURL command and update build configuration.\"\"\"\n try:\n parsed = parse_context(curl)\n\n # Update basic configuration\n url = parsed.url\n # Normalize URL before setting it\n url = self._normalize_url(url)\n\n build_config[\"url_input\"][\"value\"] = url\n build_config[\"method\"][\"value\"] = parsed.method.upper()\n\n # Process headers\n headers_list = [{\"key\": k, \"value\": v} for k, v in parsed.headers.items()]\n build_config[\"headers\"][\"value\"] = headers_list\n\n # Process body data\n if not parsed.data:\n build_config[\"body\"][\"value\"] = []\n elif parsed.data:\n try:\n json_data = json.loads(parsed.data)\n if isinstance(json_data, dict):\n body_list = [\n {\"key\": k, \"value\": json.dumps(v) if isinstance(v, dict | list) else str(v)}\n for k, v in json_data.items()\n ]\n build_config[\"body\"][\"value\"] = body_list\n else:\n build_config[\"body\"][\"value\"] = [{\"key\": \"data\", \"value\": json.dumps(json_data)}]\n except json.JSONDecodeError:\n build_config[\"body\"][\"value\"] = [{\"key\": \"data\", \"value\": parsed.data}]\n\n except Exception as exc:\n msg = f\"Error parsing curl: {exc}\"\n self.log(msg)\n raise ValueError(msg) from exc\n\n return build_config\n\n def _normalize_url(self, url: str) -> str:\n \"\"\"Normalize URL by adding https:// if no protocol is specified.\"\"\"\n if not url or not isinstance(url, str):\n msg = \"URL cannot be empty\"\n raise ValueError(msg)\n\n url = url.strip()\n if url.startswith((\"http://\", \"https://\")):\n return url\n return f\"https://{url}\"\n\n async def make_request(\n self,\n client: httpx.AsyncClient,\n method: str,\n url: str,\n headers: dict | None = None,\n body: Any = None,\n timeout: int = 5,\n *,\n follow_redirects: bool = True,\n save_to_file: bool = False,\n include_httpx_metadata: bool = False,\n ) -> Data:\n method = method.upper()\n if method not in {\"GET\", \"POST\", \"PATCH\", \"PUT\", \"DELETE\"}:\n msg = f\"Unsupported method: {method}\"\n raise ValueError(msg)\n\n processed_body = self._process_body(body)\n redirection_history = []\n\n try:\n # Prepare request parameters\n request_params = {\n \"method\": method,\n \"url\": url,\n \"headers\": headers,\n \"json\": processed_body,\n \"timeout\": timeout,\n \"follow_redirects\": follow_redirects,\n }\n response = await client.request(**request_params)\n\n redirection_history = [\n {\n \"url\": redirect.headers.get(\"Location\", str(redirect.url)),\n \"status_code\": redirect.status_code,\n }\n for redirect in response.history\n ]\n\n is_binary, file_path = await self._response_info(response, with_file_path=save_to_file)\n response_headers = self._headers_to_dict(response.headers)\n\n # Base metadata\n metadata = {\n \"source\": url,\n \"status_code\": response.status_code,\n \"response_headers\": response_headers,\n }\n\n if redirection_history:\n metadata[\"redirection_history\"] = redirection_history\n\n if save_to_file:\n mode = \"wb\" if is_binary else \"w\"\n encoding = response.encoding if mode == \"w\" else None\n if file_path:\n await aiofiles_os.makedirs(file_path.parent, exist_ok=True)\n if is_binary:\n async with aiofiles.open(file_path, \"wb\") as f:\n await f.write(response.content)\n await f.flush()\n else:\n async with aiofiles.open(file_path, \"w\", encoding=encoding) as f:\n await f.write(response.text)\n await f.flush()\n metadata[\"file_path\"] = str(file_path)\n\n if include_httpx_metadata:\n metadata.update({\"headers\": headers})\n return Data(data=metadata)\n\n # Handle response content\n if is_binary:\n result = response.content\n else:\n try:\n result = response.json()\n except json.JSONDecodeError:\n self.log(\"Failed to decode JSON response\")\n result = response.text.encode(\"utf-8\")\n\n metadata[\"result\"] = result\n\n if include_httpx_metadata:\n metadata.update({\"headers\": headers})\n\n return Data(data=metadata)\n except (httpx.HTTPError, httpx.RequestError, httpx.TimeoutException) as exc:\n self.log(f\"Error making request to {url}\")\n return Data(\n data={\n \"source\": url,\n \"headers\": headers,\n \"status_code\": 500,\n \"error\": str(exc),\n **({\"redirection_history\": redirection_history} if redirection_history else {}),\n },\n )\n\n def add_query_params(self, url: str, params: dict) -> str:\n \"\"\"Add query parameters to URL efficiently.\"\"\"\n if not params:\n return url\n url_parts = list(urlparse(url))\n query = dict(parse_qsl(url_parts[4]))\n query.update(params)\n url_parts[4] = urlencode(query)\n return urlunparse(url_parts)\n\n def _headers_to_dict(self, headers: httpx.Headers) -> dict[str, str]:\n \"\"\"Convert HTTP headers to a dictionary with lowercased keys.\"\"\"\n return {k.lower(): v for k, v in headers.items()}\n\n def _process_headers(self, headers: Any) -> dict:\n \"\"\"Process the headers input into a valid dictionary.\"\"\"\n if headers is None:\n return {}\n if isinstance(headers, dict):\n return headers\n if isinstance(headers, list):\n return {item[\"key\"]: item[\"value\"] for item in headers if self._is_valid_key_value_item(item)}\n return {}\n\n async def make_api_request(self) -> Data:\n \"\"\"Make HTTP request with optimized parameter handling.\"\"\"\n method = self.method\n url = self.url_input.strip() if isinstance(self.url_input, str) else \"\"\n headers = self.headers or {}\n body = self.body or {}\n timeout = self.timeout\n follow_redirects = self.follow_redirects\n save_to_file = self.save_to_file\n include_httpx_metadata = self.include_httpx_metadata\n\n # if self.mode == \"cURL\" and self.curl_input:\n # self._build_config = self.parse_curl(self.curl_input, dotdict())\n # # After parsing curl, get the normalized URL\n # url = self._build_config[\"url_input\"][\"value\"]\n\n # Normalize URL before validation\n url = self._normalize_url(url)\n\n # Validate URL\n if not validators.url(url):\n msg = f\"Invalid URL provided: {url}\"\n raise ValueError(msg)\n\n # Process query parameters\n if isinstance(self.query_params, str):\n query_params = dict(parse_qsl(self.query_params))\n else:\n query_params = self.query_params.data if self.query_params else {}\n\n # Process headers and body\n headers = self._process_headers(headers)\n body = self._process_body(body)\n url = self.add_query_params(url, query_params)\n\n async with httpx.AsyncClient() as client:\n result = await self.make_request(\n client,\n method,\n url,\n headers,\n body,\n timeout,\n follow_redirects=follow_redirects,\n save_to_file=save_to_file,\n include_httpx_metadata=include_httpx_metadata,\n )\n self.status = result\n return result\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:\n \"\"\"Update the build config based on the selected mode.\"\"\"\n if field_name != \"mode\":\n if field_name == \"curl_input\" and self.mode == \"cURL\" and self.curl_input:\n return self.parse_curl(self.curl_input, build_config)\n return build_config\n\n # print(f\"Current mode: {field_value}\")\n if field_value == \"cURL\":\n set_field_display(build_config, \"curl_input\", value=True)\n if build_config[\"curl_input\"][\"value\"]:\n build_config = self.parse_curl(build_config[\"curl_input\"][\"value\"], build_config)\n else:\n set_field_display(build_config, \"curl_input\", value=False)\n\n return set_current_fields(\n build_config=build_config,\n action_fields=MODE_FIELDS,\n selected_action=field_value,\n default_fields=DEFAULT_FIELDS,\n func=set_field_advanced,\n default_value=True,\n )\n\n async def _response_info(\n self, response: httpx.Response, *, with_file_path: bool = False\n ) -> tuple[bool, Path | None]:\n \"\"\"Determine the file path and whether the response content is binary.\n\n Args:\n response (Response): The HTTP response object.\n with_file_path (bool): Whether to save the response content to a file.\n\n Returns:\n Tuple[bool, Path | None]:\n A tuple containing a boolean indicating if the content is binary and the full file path (if applicable).\n \"\"\"\n content_type = response.headers.get(\"Content-Type\", \"\")\n is_binary = \"application/octet-stream\" in content_type or \"application/binary\" in content_type\n\n if not with_file_path:\n return is_binary, None\n\n component_temp_dir = Path(tempfile.gettempdir()) / self.__class__.__name__\n\n # Create directory asynchronously\n await aiofiles_os.makedirs(component_temp_dir, exist_ok=True)\n\n filename = None\n if \"Content-Disposition\" in response.headers:\n content_disposition = response.headers[\"Content-Disposition\"]\n filename_match = re.search(r'filename=\"(.+?)\"', content_disposition)\n if filename_match:\n extracted_filename = filename_match.group(1)\n filename = extracted_filename\n\n # Step 3: Infer file extension or use part of the request URL if no filename\n if not filename:\n # Extract the last segment of the URL path\n url_path = urlparse(str(response.request.url) if response.request else \"\").path\n base_name = Path(url_path).name # Get the last segment of the path\n if not base_name: # If the path ends with a slash or is empty\n base_name = \"response\"\n\n # Infer file extension\n content_type_to_extension = {\n \"text/plain\": \".txt\",\n \"application/json\": \".json\",\n \"image/jpeg\": \".jpg\",\n \"image/png\": \".png\",\n \"application/octet-stream\": \".bin\",\n }\n extension = content_type_to_extension.get(content_type, \".bin\" if is_binary else \".txt\")\n filename = f\"{base_name}{extension}\"\n\n # Step 4: Define the full file path\n file_path = component_temp_dir / filename\n\n # Step 5: Check if file exists asynchronously and handle accordingly\n try:\n # Try to create the file exclusively (x mode) to check existence\n async with aiofiles.open(file_path, \"x\") as _:\n pass # File created successfully, we can use this path\n except FileExistsError:\n # If file exists, append a timestamp to the filename\n timestamp = datetime.now(timezone.utc).strftime(\"%Y%m%d%H%M%S%f\")\n file_path = component_temp_dir / f\"{timestamp}-{filename}\"\n\n return is_binary, file_path\n"
},
"curl_input": {
"_input_type": "MultilineInput",
@@ -1468,7 +1468,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
+ "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
},
"handle_parsing_errors": {
"_input_type": "BoolInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Portfolio Website Code Generator.json b/src/backend/base/langflow/initial_setup/starter_projects/Portfolio Website Code Generator.json
index a2a1fc6d1..5544fae0a 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Portfolio Website Code Generator.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Portfolio Website Code Generator.json
@@ -1128,7 +1128,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -1529,7 +1529,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from pydantic import BaseModel, Field, create_model\nfrom trustcall import create_extractor\n\nfrom langflow.base.models.chat_result import get_chat_result\nfrom langflow.custom import Component\nfrom langflow.helpers.base_model import build_model_from_schema\nfrom langflow.io import (\n BoolInput,\n HandleInput,\n MessageTextInput,\n MultilineInput,\n Output,\n TableInput,\n)\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.table import EditMode\n\n\nclass StructuredOutputComponent(Component):\n display_name = \"Structured Output\"\n description = (\n \"Transforms LLM responses into **structured data formats**. Ideal for extracting specific information \"\n \"or creating consistent outputs.\"\n )\n name = \"StructuredOutput\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"llm\",\n display_name=\"Language Model\",\n info=\"The language model to use to generate the structured output.\",\n input_types=[\"LanguageModel\"],\n required=True,\n ),\n MessageTextInput(\n name=\"input_value\",\n display_name=\"Input Message\",\n info=\"The input message to the language model.\",\n tool_mode=True,\n required=True,\n ),\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Format Instructions\",\n info=\"The instructions to the language model for formatting the output.\",\n value=(\n \"You are an AI system designed to extract structured information from unstructured text.\"\n \"Given the input_text, return a JSON object with predefined keys based on the expected structure.\"\n \"Extract values accurately and format them according to the specified type \"\n \"(e.g., string, integer, float, date).\"\n \"If a value is missing or cannot be determined, return a default \"\n \"(e.g., null, 0, or 'N/A').\"\n \"If multiple instances of the expected structure exist within the input_text, \"\n \"stream each as a separate JSON object.\"\n ),\n required=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"schema_name\",\n display_name=\"Schema Name\",\n info=\"Provide a name for the output data schema.\",\n advanced=True,\n ),\n TableInput(\n name=\"output_schema\",\n display_name=\"Output Schema\",\n info=\"Define the structure and data types for the model's output.\",\n required=True,\n # TODO: remove deault value\n table_schema=[\n {\n \"name\": \"name\",\n \"display_name\": \"Name\",\n \"type\": \"str\",\n \"description\": \"Specify the name of the output field.\",\n \"default\": \"field\",\n \"edit_mode\": EditMode.INLINE,\n },\n {\n \"name\": \"description\",\n \"display_name\": \"Description\",\n \"type\": \"str\",\n \"description\": \"Describe the purpose of the output field.\",\n \"default\": \"description of field\",\n \"edit_mode\": EditMode.POPOVER,\n },\n {\n \"name\": \"type\",\n \"display_name\": \"Type\",\n \"type\": \"str\",\n \"edit_mode\": EditMode.INLINE,\n \"description\": (\"Indicate the data type of the output field (e.g., str, int, float, bool, dict).\"),\n \"options\": [\"str\", \"int\", \"float\", \"bool\", \"dict\"],\n \"default\": \"str\",\n },\n ],\n value=[\n {\n \"name\": \"field\",\n \"description\": \"description of field\",\n \"type\": \"str\",\n \"multiple\": \"False\",\n }\n ],\n ),\n BoolInput(\n name=\"multiple\",\n advanced=True,\n display_name=\"Generate Multiple\",\n info=\"[Deprecated] Always set to True\",\n value=True,\n ),\n ]\n\n outputs = [\n Output(\n name=\"structured_output\",\n display_name=\"Structured Output\",\n method=\"build_structured_output\",\n ),\n Output(\n name=\"structured_output_dataframe\",\n display_name=\"DataFrame\",\n method=\"as_dataframe\",\n ),\n ]\n\n def build_structured_output_base(self) -> Data:\n schema_name = self.schema_name or \"OutputModel\"\n\n if not hasattr(self.llm, \"with_structured_output\"):\n msg = \"Language model does not support structured output.\"\n raise TypeError(msg)\n if not self.output_schema:\n msg = \"Output schema cannot be empty\"\n raise ValueError(msg)\n\n output_model_ = build_model_from_schema(self.output_schema)\n\n output_model = create_model(\n schema_name,\n __doc__=f\"A list of {schema_name}.\",\n objects=(list[output_model_], Field(description=f\"A list of {schema_name}.\")), # type: ignore[valid-type]\n )\n\n try:\n llm_with_structured_output = create_extractor(self.llm, tools=[output_model])\n except NotImplementedError as exc:\n msg = f\"{self.llm.__class__.__name__} does not support structured output.\"\n raise TypeError(msg) from exc\n config_dict = {\n \"run_name\": self.display_name,\n \"project_name\": self.get_project_name(),\n \"callbacks\": self.get_langchain_callbacks(),\n }\n result = get_chat_result(\n runnable=llm_with_structured_output,\n system_message=self.system_prompt,\n input_value=self.input_value,\n config=config_dict,\n )\n if isinstance(result, BaseModel):\n result = result.model_dump()\n if responses := result.get(\"responses\"):\n result = responses[0].model_dump()\n if result and \"objects\" in result:\n return result[\"objects\"]\n\n return result\n\n def build_structured_output(self) -> Data:\n output = self.build_structured_output_base()\n\n return Data(text_key=\"results\", data={\"results\": output})\n\n def as_dataframe(self) -> DataFrame:\n output = self.build_structured_output_base()\n if isinstance(output, list):\n return DataFrame(data=output)\n return DataFrame(data=[output])\n"
+ "value": "from pydantic import BaseModel, Field, create_model\nfrom trustcall import create_extractor\n\nfrom langflow.base.models.chat_result import get_chat_result\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.helpers.base_model import build_model_from_schema\nfrom langflow.io import (\n BoolInput,\n HandleInput,\n MessageTextInput,\n MultilineInput,\n Output,\n TableInput,\n)\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.table import EditMode\n\n\nclass StructuredOutputComponent(Component):\n display_name = \"Structured Output\"\n description = (\n \"Transforms LLM responses into **structured data formats**. Ideal for extracting specific information \"\n \"or creating consistent outputs.\"\n )\n name = \"StructuredOutput\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"llm\",\n display_name=\"Language Model\",\n info=\"The language model to use to generate the structured output.\",\n input_types=[\"LanguageModel\"],\n required=True,\n ),\n MessageTextInput(\n name=\"input_value\",\n display_name=\"Input Message\",\n info=\"The input message to the language model.\",\n tool_mode=True,\n required=True,\n ),\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Format Instructions\",\n info=\"The instructions to the language model for formatting the output.\",\n value=(\n \"You are an AI system designed to extract structured information from unstructured text.\"\n \"Given the input_text, return a JSON object with predefined keys based on the expected structure.\"\n \"Extract values accurately and format them according to the specified type \"\n \"(e.g., string, integer, float, date).\"\n \"If a value is missing or cannot be determined, return a default \"\n \"(e.g., null, 0, or 'N/A').\"\n \"If multiple instances of the expected structure exist within the input_text, \"\n \"stream each as a separate JSON object.\"\n ),\n required=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"schema_name\",\n display_name=\"Schema Name\",\n info=\"Provide a name for the output data schema.\",\n advanced=True,\n ),\n TableInput(\n name=\"output_schema\",\n display_name=\"Output Schema\",\n info=\"Define the structure and data types for the model's output.\",\n required=True,\n # TODO: remove deault value\n table_schema=[\n {\n \"name\": \"name\",\n \"display_name\": \"Name\",\n \"type\": \"str\",\n \"description\": \"Specify the name of the output field.\",\n \"default\": \"field\",\n \"edit_mode\": EditMode.INLINE,\n },\n {\n \"name\": \"description\",\n \"display_name\": \"Description\",\n \"type\": \"str\",\n \"description\": \"Describe the purpose of the output field.\",\n \"default\": \"description of field\",\n \"edit_mode\": EditMode.POPOVER,\n },\n {\n \"name\": \"type\",\n \"display_name\": \"Type\",\n \"type\": \"str\",\n \"edit_mode\": EditMode.INLINE,\n \"description\": (\"Indicate the data type of the output field (e.g., str, int, float, bool, dict).\"),\n \"options\": [\"str\", \"int\", \"float\", \"bool\", \"dict\"],\n \"default\": \"str\",\n },\n ],\n value=[\n {\n \"name\": \"field\",\n \"description\": \"description of field\",\n \"type\": \"str\",\n \"multiple\": \"False\",\n }\n ],\n ),\n BoolInput(\n name=\"multiple\",\n advanced=True,\n display_name=\"Generate Multiple\",\n info=\"[Deprecated] Always set to True\",\n value=True,\n ),\n ]\n\n outputs = [\n Output(\n name=\"structured_output\",\n display_name=\"Structured Output\",\n method=\"build_structured_output\",\n ),\n Output(\n name=\"structured_output_dataframe\",\n display_name=\"DataFrame\",\n method=\"as_dataframe\",\n ),\n ]\n\n def build_structured_output_base(self) -> Data:\n schema_name = self.schema_name or \"OutputModel\"\n\n if not hasattr(self.llm, \"with_structured_output\"):\n msg = \"Language model does not support structured output.\"\n raise TypeError(msg)\n if not self.output_schema:\n msg = \"Output schema cannot be empty\"\n raise ValueError(msg)\n\n output_model_ = build_model_from_schema(self.output_schema)\n\n output_model = create_model(\n schema_name,\n __doc__=f\"A list of {schema_name}.\",\n objects=(list[output_model_], Field(description=f\"A list of {schema_name}.\")), # type: ignore[valid-type]\n )\n\n try:\n llm_with_structured_output = create_extractor(self.llm, tools=[output_model])\n except NotImplementedError as exc:\n msg = f\"{self.llm.__class__.__name__} does not support structured output.\"\n raise TypeError(msg) from exc\n config_dict = {\n \"run_name\": self.display_name,\n \"project_name\": self.get_project_name(),\n \"callbacks\": self.get_langchain_callbacks(),\n }\n result = get_chat_result(\n runnable=llm_with_structured_output,\n system_message=self.system_prompt,\n input_value=self.input_value,\n config=config_dict,\n )\n if isinstance(result, BaseModel):\n result = result.model_dump()\n if responses := result.get(\"responses\"):\n result = responses[0].model_dump()\n if result and \"objects\" in result:\n return result[\"objects\"]\n\n return result\n\n def build_structured_output(self) -> Data:\n output = self.build_structured_output_base()\n\n return Data(text_key=\"results\", data={\"results\": output})\n\n def as_dataframe(self) -> DataFrame:\n output = self.build_structured_output_base()\n if isinstance(output, list):\n return DataFrame(data=output)\n return DataFrame(data=[output])\n"
},
"input_value": {
"_input_type": "MessageTextInput",
@@ -1888,7 +1888,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data import BaseFileComponent\nfrom langflow.base.data.utils import TEXT_FILE_TYPES, parallel_load_data, parse_text_file_to_data\nfrom langflow.io import BoolInput, IntInput\nfrom langflow.schema import Data\n\n\nclass FileComponent(BaseFileComponent):\n \"\"\"Handles loading and processing of individual or zipped text files.\n\n This component supports processing multiple valid files within a zip archive,\n resolving paths, validating file types, and optionally using multithreading for processing.\n \"\"\"\n\n display_name = \"File\"\n description = \"Loads content from one or more files as a DataFrame.\"\n icon = \"file-text\"\n name = \"File\"\n\n VALID_EXTENSIONS = TEXT_FILE_TYPES\n\n inputs = [\n *BaseFileComponent._base_inputs,\n BoolInput(\n name=\"use_multithreading\",\n display_name=\"[Deprecated] Use Multithreading\",\n advanced=True,\n value=True,\n info=\"Set 'Processing Concurrency' greater than 1 to enable multithreading.\",\n ),\n IntInput(\n name=\"concurrency_multithreading\",\n display_name=\"Processing Concurrency\",\n advanced=True,\n info=\"When multiple files are being processed, the number of files to process concurrently.\",\n value=1,\n ),\n ]\n\n outputs = [\n *BaseFileComponent._base_outputs,\n ]\n\n def process_files(self, file_list: list[BaseFileComponent.BaseFile]) -> list[BaseFileComponent.BaseFile]:\n \"\"\"Processes files either sequentially or in parallel, depending on concurrency settings.\n\n Args:\n file_list (list[BaseFileComponent.BaseFile]): List of files to process.\n\n Returns:\n list[BaseFileComponent.BaseFile]: Updated list of files with merged data.\n \"\"\"\n\n def process_file(file_path: str, *, silent_errors: bool = False) -> Data | None:\n \"\"\"Processes a single file and returns its Data object.\"\"\"\n try:\n return parse_text_file_to_data(file_path, silent_errors=silent_errors)\n except FileNotFoundError as e:\n msg = f\"File not found: {file_path}. Error: {e}\"\n self.log(msg)\n if not silent_errors:\n raise\n return None\n except Exception as e:\n msg = f\"Unexpected error processing {file_path}: {e}\"\n self.log(msg)\n if not silent_errors:\n raise\n return None\n\n if not file_list:\n msg = \"No files to process.\"\n raise ValueError(msg)\n\n concurrency = 1 if not self.use_multithreading else max(1, self.concurrency_multithreading)\n file_count = len(file_list)\n\n parallel_processing_threshold = 2\n if concurrency < parallel_processing_threshold or file_count < parallel_processing_threshold:\n if file_count > 1:\n self.log(f\"Processing {file_count} files sequentially.\")\n processed_data = [process_file(str(file.path), silent_errors=self.silent_errors) for file in file_list]\n else:\n self.log(f\"Starting parallel processing of {file_count} files with concurrency: {concurrency}.\")\n file_paths = [str(file.path) for file in file_list]\n processed_data = parallel_load_data(\n file_paths,\n silent_errors=self.silent_errors,\n load_function=process_file,\n max_concurrency=concurrency,\n )\n\n # Use rollup_basefile_data to merge processed data with BaseFile objects\n return self.rollup_data(file_list, processed_data)\n"
+ "value": "from langflow.base.data.base_file import BaseFileComponent\nfrom langflow.base.data.utils import TEXT_FILE_TYPES, parallel_load_data, parse_text_file_to_data\nfrom langflow.io import BoolInput, IntInput\nfrom langflow.schema.data import Data\n\n\nclass FileComponent(BaseFileComponent):\n \"\"\"Handles loading and processing of individual or zipped text files.\n\n This component supports processing multiple valid files within a zip archive,\n resolving paths, validating file types, and optionally using multithreading for processing.\n \"\"\"\n\n display_name = \"File\"\n description = \"Loads content from one or more files as a DataFrame.\"\n icon = \"file-text\"\n name = \"File\"\n\n VALID_EXTENSIONS = TEXT_FILE_TYPES\n\n inputs = [\n *BaseFileComponent._base_inputs,\n BoolInput(\n name=\"use_multithreading\",\n display_name=\"[Deprecated] Use Multithreading\",\n advanced=True,\n value=True,\n info=\"Set 'Processing Concurrency' greater than 1 to enable multithreading.\",\n ),\n IntInput(\n name=\"concurrency_multithreading\",\n display_name=\"Processing Concurrency\",\n advanced=True,\n info=\"When multiple files are being processed, the number of files to process concurrently.\",\n value=1,\n ),\n ]\n\n outputs = [\n *BaseFileComponent._base_outputs,\n ]\n\n def process_files(self, file_list: list[BaseFileComponent.BaseFile]) -> list[BaseFileComponent.BaseFile]:\n \"\"\"Processes files either sequentially or in parallel, depending on concurrency settings.\n\n Args:\n file_list (list[BaseFileComponent.BaseFile]): List of files to process.\n\n Returns:\n list[BaseFileComponent.BaseFile]: Updated list of files with merged data.\n \"\"\"\n\n def process_file(file_path: str, *, silent_errors: bool = False) -> Data | None:\n \"\"\"Processes a single file and returns its Data object.\"\"\"\n try:\n return parse_text_file_to_data(file_path, silent_errors=silent_errors)\n except FileNotFoundError as e:\n msg = f\"File not found: {file_path}. Error: {e}\"\n self.log(msg)\n if not silent_errors:\n raise\n return None\n except Exception as e:\n msg = f\"Unexpected error processing {file_path}: {e}\"\n self.log(msg)\n if not silent_errors:\n raise\n return None\n\n if not file_list:\n msg = \"No files to process.\"\n raise ValueError(msg)\n\n concurrency = 1 if not self.use_multithreading else max(1, self.concurrency_multithreading)\n file_count = len(file_list)\n\n parallel_processing_threshold = 2\n if concurrency < parallel_processing_threshold or file_count < parallel_processing_threshold:\n if file_count > 1:\n self.log(f\"Processing {file_count} files sequentially.\")\n processed_data = [process_file(str(file.path), silent_errors=self.silent_errors) for file in file_list]\n else:\n self.log(f\"Starting parallel processing of {file_count} files with concurrency: {concurrency}.\")\n file_paths = [str(file.path) for file in file_list]\n processed_data = parallel_load_data(\n file_paths,\n silent_errors=self.silent_errors,\n load_function=process_file,\n max_concurrency=concurrency,\n )\n\n # Use rollup_basefile_data to merge processed data with BaseFile objects\n return self.rollup_data(file_list, processed_data)\n"
},
"concurrency_multithreading": {
"_input_type": "IntInput",
@@ -2166,7 +2166,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.custom import Component\nfrom langflow.helpers.data import safe_convert\nfrom langflow.io import (\n BoolInput,\n HandleInput,\n MessageTextInput,\n MultilineInput,\n Output,\n TabInput,\n)\nfrom langflow.schema import Data, DataFrame\nfrom langflow.schema.message import Message\n\n\nclass ParserComponent(Component):\n display_name = \"Parser\"\n description = \"Extracts text using a template.\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"input_data\",\n display_name=\"Data or DataFrame\",\n input_types=[\"DataFrame\", \"Data\"],\n info=\"Accepts either a DataFrame or a Data object.\",\n required=True,\n ),\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Parser\", \"Stringify\"],\n value=\"Parser\",\n info=\"Convert into raw string instead of using a template.\",\n real_time_refresh=True,\n ),\n MultilineInput(\n name=\"pattern\",\n display_name=\"Template\",\n info=(\n \"Use variables within curly brackets to extract column values for DataFrames \"\n \"or key values for Data.\"\n \"For example: `Name: {Name}, Age: {Age}, Country: {Country}`\"\n ),\n value=\"Text: {text}\", # Example default\n dynamic=True,\n show=True,\n required=True,\n ),\n MessageTextInput(\n name=\"sep\",\n display_name=\"Separator\",\n advanced=True,\n value=\"\\n\",\n info=\"String used to separate rows/items.\",\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"Parsed Text\",\n name=\"parsed_text\",\n info=\"Formatted text output.\",\n method=\"parse_combined_text\",\n ),\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n \"\"\"Dynamically hide/show `template` and enforce requirement based on `stringify`.\"\"\"\n if field_name == \"mode\":\n build_config[\"pattern\"][\"show\"] = self.mode == \"Parser\"\n build_config[\"pattern\"][\"required\"] = self.mode == \"Parser\"\n if field_value:\n clean_data = BoolInput(\n name=\"clean_data\",\n display_name=\"Clean Data\",\n info=(\n \"Enable to clean the data by removing empty rows and lines \"\n \"in each cell of the DataFrame/ Data object.\"\n ),\n value=True,\n advanced=True,\n required=False,\n )\n build_config[\"clean_data\"] = clean_data.to_dict()\n else:\n build_config.pop(\"clean_data\", None)\n\n return build_config\n\n def _clean_args(self):\n \"\"\"Prepare arguments based on input type.\"\"\"\n input_data = self.input_data\n\n match input_data:\n case list() if all(isinstance(item, Data) for item in input_data):\n msg = \"List of Data objects is not supported.\"\n raise ValueError(msg)\n case DataFrame():\n return input_data, None\n case Data():\n return None, input_data\n case dict() if \"data\" in input_data:\n try:\n if \"columns\" in input_data: # Likely a DataFrame\n return DataFrame.from_dict(input_data), None\n # Likely a Data object\n return None, Data(**input_data)\n except (TypeError, ValueError, KeyError) as e:\n msg = f\"Invalid structured input provided: {e!s}\"\n raise ValueError(msg) from e\n case _:\n msg = f\"Unsupported input type: {type(input_data)}. Expected DataFrame or Data.\"\n raise ValueError(msg)\n\n def parse_combined_text(self) -> Message:\n \"\"\"Parse all rows/items into a single text or convert input to string if `stringify` is enabled.\"\"\"\n # Early return for stringify option\n if self.mode == \"Stringify\":\n return self.convert_to_string()\n\n df, data = self._clean_args()\n\n lines = []\n if df is not None:\n for _, row in df.iterrows():\n formatted_text = self.pattern.format(**row.to_dict())\n lines.append(formatted_text)\n elif data is not None:\n formatted_text = self.pattern.format(**data.data)\n lines.append(formatted_text)\n\n combined_text = self.sep.join(lines)\n self.status = combined_text\n return Message(text=combined_text)\n\n def convert_to_string(self) -> Message:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n result = \"\"\n if isinstance(self.input_data, list):\n result = \"\\n\".join([safe_convert(item, clean_data=self.clean_data or False) for item in self.input_data])\n else:\n result = safe_convert(self.input_data or False)\n self.log(f\"Converted to string with length: {len(result)}\")\n\n message = Message(text=result)\n self.status = message\n return message\n"
+ "value": "from langflow.custom.custom_component.component import Component\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, HandleInput, MessageTextInput, MultilineInput, TabInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.template.field.base import Output\n\n\nclass ParserComponent(Component):\n display_name = \"Parser\"\n description = \"Extracts text using a template.\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"input_data\",\n display_name=\"Data or DataFrame\",\n input_types=[\"DataFrame\", \"Data\"],\n info=\"Accepts either a DataFrame or a Data object.\",\n required=True,\n ),\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Parser\", \"Stringify\"],\n value=\"Parser\",\n info=\"Convert into raw string instead of using a template.\",\n real_time_refresh=True,\n ),\n MultilineInput(\n name=\"pattern\",\n display_name=\"Template\",\n info=(\n \"Use variables within curly brackets to extract column values for DataFrames \"\n \"or key values for Data.\"\n \"For example: `Name: {Name}, Age: {Age}, Country: {Country}`\"\n ),\n value=\"Text: {text}\", # Example default\n dynamic=True,\n show=True,\n required=True,\n ),\n MessageTextInput(\n name=\"sep\",\n display_name=\"Separator\",\n advanced=True,\n value=\"\\n\",\n info=\"String used to separate rows/items.\",\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"Parsed Text\",\n name=\"parsed_text\",\n info=\"Formatted text output.\",\n method=\"parse_combined_text\",\n ),\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n \"\"\"Dynamically hide/show `template` and enforce requirement based on `stringify`.\"\"\"\n if field_name == \"mode\":\n build_config[\"pattern\"][\"show\"] = self.mode == \"Parser\"\n build_config[\"pattern\"][\"required\"] = self.mode == \"Parser\"\n if field_value:\n clean_data = BoolInput(\n name=\"clean_data\",\n display_name=\"Clean Data\",\n info=(\n \"Enable to clean the data by removing empty rows and lines \"\n \"in each cell of the DataFrame/ Data object.\"\n ),\n value=True,\n advanced=True,\n required=False,\n )\n build_config[\"clean_data\"] = clean_data.to_dict()\n else:\n build_config.pop(\"clean_data\", None)\n\n return build_config\n\n def _clean_args(self):\n \"\"\"Prepare arguments based on input type.\"\"\"\n input_data = self.input_data\n\n match input_data:\n case list() if all(isinstance(item, Data) for item in input_data):\n msg = \"List of Data objects is not supported.\"\n raise ValueError(msg)\n case DataFrame():\n return input_data, None\n case Data():\n return None, input_data\n case dict() if \"data\" in input_data:\n try:\n if \"columns\" in input_data: # Likely a DataFrame\n return DataFrame.from_dict(input_data), None\n # Likely a Data object\n return None, Data(**input_data)\n except (TypeError, ValueError, KeyError) as e:\n msg = f\"Invalid structured input provided: {e!s}\"\n raise ValueError(msg) from e\n case _:\n msg = f\"Unsupported input type: {type(input_data)}. Expected DataFrame or Data.\"\n raise ValueError(msg)\n\n def parse_combined_text(self) -> Message:\n \"\"\"Parse all rows/items into a single text or convert input to string if `stringify` is enabled.\"\"\"\n # Early return for stringify option\n if self.mode == \"Stringify\":\n return self.convert_to_string()\n\n df, data = self._clean_args()\n\n lines = []\n if df is not None:\n for _, row in df.iterrows():\n formatted_text = self.pattern.format(**row.to_dict())\n lines.append(formatted_text)\n elif data is not None:\n formatted_text = self.pattern.format(**data.data)\n lines.append(formatted_text)\n\n combined_text = self.sep.join(lines)\n self.status = combined_text\n return Message(text=combined_text)\n\n def convert_to_string(self) -> Message:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n result = \"\"\n if isinstance(self.input_data, list):\n result = \"\\n\".join([safe_convert(item, clean_data=self.clean_data or False) for item in self.input_data])\n else:\n result = safe_convert(self.input_data or False)\n self.log(f\"Converted to string with length: {len(result)}\")\n\n message = Message(text=result)\n self.status = message\n return message\n"
},
"input_data": {
"_input_type": "HandleInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Price Deal Finder.json b/src/backend/base/langflow/initial_setup/starter_projects/Price Deal Finder.json
index 6c4314244..1cc3a73ca 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Price Deal Finder.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Price Deal Finder.json
@@ -233,7 +233,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
@@ -563,7 +563,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -845,7 +845,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import httpx\nfrom loguru import logger\n\nfrom langflow.custom import Component\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, Output, SecretStrInput\nfrom langflow.schema import Data\nfrom langflow.schema.dataframe import DataFrame\n\n\nclass TavilySearchComponent(Component):\n display_name = \"Tavily Search API\"\n description = \"\"\"**Tavily Search** is a search engine optimized for LLMs and RAG, \\\n aimed at efficient, quick, and persistent search results.\"\"\"\n icon = \"TavilyIcon\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Tavily API Key\",\n required=True,\n info=\"Your Tavily API Key.\",\n ),\n MessageTextInput(\n name=\"query\",\n display_name=\"Search Query\",\n info=\"The search query you want to execute with Tavily.\",\n tool_mode=True,\n ),\n DropdownInput(\n name=\"search_depth\",\n display_name=\"Search Depth\",\n info=\"The depth of the search.\",\n options=[\"basic\", \"advanced\"],\n value=\"advanced\",\n advanced=True,\n ),\n IntInput(\n name=\"chunks_per_source\",\n display_name=\"Chunks Per Source\",\n info=(\"The number of content chunks to retrieve from each source (1-3). Only works with advanced search.\"),\n value=3,\n advanced=True,\n ),\n DropdownInput(\n name=\"topic\",\n display_name=\"Search Topic\",\n info=\"The category of the search.\",\n options=[\"general\", \"news\"],\n value=\"general\",\n advanced=True,\n ),\n IntInput(\n name=\"days\",\n display_name=\"Days\",\n info=\"Number of days back from current date to include. Only available with news topic.\",\n value=7,\n advanced=True,\n ),\n IntInput(\n name=\"max_results\",\n display_name=\"Max Results\",\n info=\"The maximum number of search results to return.\",\n value=5,\n advanced=True,\n ),\n BoolInput(\n name=\"include_answer\",\n display_name=\"Include Answer\",\n info=\"Include a short answer to original query.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"time_range\",\n display_name=\"Time Range\",\n info=\"The time range back from the current date to filter results.\",\n options=[\"day\", \"week\", \"month\", \"year\"],\n value=None, # Default to None to make it optional\n advanced=True,\n ),\n BoolInput(\n name=\"include_images\",\n display_name=\"Include Images\",\n info=\"Include a list of query-related images in the response.\",\n value=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"include_domains\",\n display_name=\"Include Domains\",\n info=\"Comma-separated list of domains to include in the search results.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"exclude_domains\",\n display_name=\"Exclude Domains\",\n info=\"Comma-separated list of domains to exclude from the search results.\",\n advanced=True,\n ),\n BoolInput(\n name=\"include_raw_content\",\n display_name=\"Include Raw Content\",\n info=\"Include the cleaned and parsed HTML content of each search result.\",\n value=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"fetch_content_dataframe\"),\n ]\n\n def fetch_content(self) -> list[Data]:\n try:\n # Only process domains if they're provided\n include_domains = None\n exclude_domains = None\n\n if self.include_domains:\n include_domains = [domain.strip() for domain in self.include_domains.split(\",\") if domain.strip()]\n\n if self.exclude_domains:\n exclude_domains = [domain.strip() for domain in self.exclude_domains.split(\",\") if domain.strip()]\n\n url = \"https://api.tavily.com/search\"\n headers = {\n \"content-type\": \"application/json\",\n \"accept\": \"application/json\",\n }\n\n payload = {\n \"api_key\": self.api_key,\n \"query\": self.query,\n \"search_depth\": self.search_depth,\n \"topic\": self.topic,\n \"max_results\": self.max_results,\n \"include_images\": self.include_images,\n \"include_answer\": self.include_answer,\n \"include_raw_content\": self.include_raw_content,\n \"days\": self.days,\n \"time_range\": self.time_range,\n }\n\n # Only add domains to payload if they exist and have values\n if include_domains:\n payload[\"include_domains\"] = include_domains\n if exclude_domains:\n payload[\"exclude_domains\"] = exclude_domains\n\n # Add conditional parameters only if they should be included\n if self.search_depth == \"advanced\" and self.chunks_per_source:\n payload[\"chunks_per_source\"] = self.chunks_per_source\n\n if self.topic == \"news\" and self.days:\n payload[\"days\"] = int(self.days) # Ensure days is an integer\n\n # Add time_range if it's set\n if hasattr(self, \"time_range\") and self.time_range:\n payload[\"time_range\"] = self.time_range\n\n # Add timeout handling\n with httpx.Client(timeout=90.0) as client:\n response = client.post(url, json=payload, headers=headers)\n\n response.raise_for_status()\n search_results = response.json()\n\n data_results = []\n\n if self.include_answer and search_results.get(\"answer\"):\n data_results.append(Data(text=search_results[\"answer\"]))\n\n for result in search_results.get(\"results\", []):\n content = result.get(\"content\", \"\")\n result_data = {\n \"title\": result.get(\"title\"),\n \"url\": result.get(\"url\"),\n \"content\": content,\n \"score\": result.get(\"score\"),\n }\n if self.include_raw_content:\n result_data[\"raw_content\"] = result.get(\"raw_content\")\n\n data_results.append(Data(text=content, data=result_data))\n\n if self.include_images and search_results.get(\"images\"):\n data_results.append(Data(text=\"Images found\", data={\"images\": search_results[\"images\"]}))\n\n except httpx.TimeoutException:\n error_message = \"Request timed out (90s). Please try again or adjust parameters.\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.HTTPStatusError as exc:\n error_message = f\"HTTP error occurred: {exc.response.status_code} - {exc.response.text}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.RequestError as exc:\n error_message = f\"Request error occurred: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except ValueError as exc:\n error_message = f\"Invalid response format: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n else:\n self.status = data_results\n return data_results\n\n def fetch_content_dataframe(self) -> DataFrame:\n data = self.fetch_content()\n return DataFrame(data)\n"
+ "value": "import httpx\nfrom loguru import logger\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.template.field.base import Output\n\n\nclass TavilySearchComponent(Component):\n display_name = \"Tavily Search API\"\n description = \"\"\"**Tavily Search** is a search engine optimized for LLMs and RAG, \\\n aimed at efficient, quick, and persistent search results.\"\"\"\n icon = \"TavilyIcon\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Tavily API Key\",\n required=True,\n info=\"Your Tavily API Key.\",\n ),\n MessageTextInput(\n name=\"query\",\n display_name=\"Search Query\",\n info=\"The search query you want to execute with Tavily.\",\n tool_mode=True,\n ),\n DropdownInput(\n name=\"search_depth\",\n display_name=\"Search Depth\",\n info=\"The depth of the search.\",\n options=[\"basic\", \"advanced\"],\n value=\"advanced\",\n advanced=True,\n ),\n IntInput(\n name=\"chunks_per_source\",\n display_name=\"Chunks Per Source\",\n info=(\"The number of content chunks to retrieve from each source (1-3). Only works with advanced search.\"),\n value=3,\n advanced=True,\n ),\n DropdownInput(\n name=\"topic\",\n display_name=\"Search Topic\",\n info=\"The category of the search.\",\n options=[\"general\", \"news\"],\n value=\"general\",\n advanced=True,\n ),\n IntInput(\n name=\"days\",\n display_name=\"Days\",\n info=\"Number of days back from current date to include. Only available with news topic.\",\n value=7,\n advanced=True,\n ),\n IntInput(\n name=\"max_results\",\n display_name=\"Max Results\",\n info=\"The maximum number of search results to return.\",\n value=5,\n advanced=True,\n ),\n BoolInput(\n name=\"include_answer\",\n display_name=\"Include Answer\",\n info=\"Include a short answer to original query.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"time_range\",\n display_name=\"Time Range\",\n info=\"The time range back from the current date to filter results.\",\n options=[\"day\", \"week\", \"month\", \"year\"],\n value=None, # Default to None to make it optional\n advanced=True,\n ),\n BoolInput(\n name=\"include_images\",\n display_name=\"Include Images\",\n info=\"Include a list of query-related images in the response.\",\n value=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"include_domains\",\n display_name=\"Include Domains\",\n info=\"Comma-separated list of domains to include in the search results.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"exclude_domains\",\n display_name=\"Exclude Domains\",\n info=\"Comma-separated list of domains to exclude from the search results.\",\n advanced=True,\n ),\n BoolInput(\n name=\"include_raw_content\",\n display_name=\"Include Raw Content\",\n info=\"Include the cleaned and parsed HTML content of each search result.\",\n value=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"fetch_content_dataframe\"),\n ]\n\n def fetch_content(self) -> list[Data]:\n try:\n # Only process domains if they're provided\n include_domains = None\n exclude_domains = None\n\n if self.include_domains:\n include_domains = [domain.strip() for domain in self.include_domains.split(\",\") if domain.strip()]\n\n if self.exclude_domains:\n exclude_domains = [domain.strip() for domain in self.exclude_domains.split(\",\") if domain.strip()]\n\n url = \"https://api.tavily.com/search\"\n headers = {\n \"content-type\": \"application/json\",\n \"accept\": \"application/json\",\n }\n\n payload = {\n \"api_key\": self.api_key,\n \"query\": self.query,\n \"search_depth\": self.search_depth,\n \"topic\": self.topic,\n \"max_results\": self.max_results,\n \"include_images\": self.include_images,\n \"include_answer\": self.include_answer,\n \"include_raw_content\": self.include_raw_content,\n \"days\": self.days,\n \"time_range\": self.time_range,\n }\n\n # Only add domains to payload if they exist and have values\n if include_domains:\n payload[\"include_domains\"] = include_domains\n if exclude_domains:\n payload[\"exclude_domains\"] = exclude_domains\n\n # Add conditional parameters only if they should be included\n if self.search_depth == \"advanced\" and self.chunks_per_source:\n payload[\"chunks_per_source\"] = self.chunks_per_source\n\n if self.topic == \"news\" and self.days:\n payload[\"days\"] = int(self.days) # Ensure days is an integer\n\n # Add time_range if it's set\n if hasattr(self, \"time_range\") and self.time_range:\n payload[\"time_range\"] = self.time_range\n\n # Add timeout handling\n with httpx.Client(timeout=90.0) as client:\n response = client.post(url, json=payload, headers=headers)\n\n response.raise_for_status()\n search_results = response.json()\n\n data_results = []\n\n if self.include_answer and search_results.get(\"answer\"):\n data_results.append(Data(text=search_results[\"answer\"]))\n\n for result in search_results.get(\"results\", []):\n content = result.get(\"content\", \"\")\n result_data = {\n \"title\": result.get(\"title\"),\n \"url\": result.get(\"url\"),\n \"content\": content,\n \"score\": result.get(\"score\"),\n }\n if self.include_raw_content:\n result_data[\"raw_content\"] = result.get(\"raw_content\")\n\n data_results.append(Data(text=content, data=result_data))\n\n if self.include_images and search_results.get(\"images\"):\n data_results.append(Data(text=\"Images found\", data={\"images\": search_results[\"images\"]}))\n\n except httpx.TimeoutException:\n error_message = \"Request timed out (90s). Please try again or adjust parameters.\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.HTTPStatusError as exc:\n error_message = f\"HTTP error occurred: {exc.response.status_code} - {exc.response.text}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.RequestError as exc:\n error_message = f\"Request error occurred: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except ValueError as exc:\n error_message = f\"Invalid response format: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n else:\n self.status = data_results\n return data_results\n\n def fetch_content_dataframe(self) -> DataFrame:\n data = self.fetch_content()\n return DataFrame(data)\n"
},
"days": {
"_input_type": "IntInput",
@@ -1282,7 +1282,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import httpx\nfrom loguru import logger\n\nfrom langflow.custom import Component\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.io import (\n BoolInput,\n DropdownInput,\n IntInput,\n MessageTextInput,\n MultilineInput,\n Output,\n SecretStrInput,\n)\nfrom langflow.schema import Data\n\n\nclass AgentQL(Component):\n display_name = \"Extract Web Data\"\n description = \"Extracts structured data from a web page using an AgentQL query or a Natural Language description.\"\n documentation: str = \"https://docs.agentql.com/rest-api/api-reference\"\n icon = \"AgentQL\"\n name = \"AgentQL\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n required=True,\n password=True,\n info=\"Your AgentQL API key from dev.agentql.com\",\n ),\n MessageTextInput(\n name=\"url\",\n display_name=\"URL\",\n required=True,\n info=\"The URL of the public web page you want to extract data from.\",\n tool_mode=True,\n ),\n MultilineInput(\n name=\"query\",\n display_name=\"AgentQL Query\",\n required=False,\n info=\"The AgentQL query to execute. Learn more at https://docs.agentql.com/agentql-query or use a prompt.\",\n tool_mode=True,\n ),\n MultilineInput(\n name=\"prompt\",\n display_name=\"Prompt\",\n required=False,\n info=\"A Natural Language description of the data to extract from the page. Alternative to AgentQL query.\",\n tool_mode=True,\n ),\n BoolInput(\n name=\"is_stealth_mode_enabled\",\n display_name=\"Enable Stealth Mode (Beta)\",\n info=\"Enable experimental anti-bot evasion strategies. May not work for all websites at all times.\",\n value=False,\n advanced=True,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"Seconds to wait for a request.\",\n value=900,\n advanced=True,\n ),\n DropdownInput(\n name=\"mode\",\n display_name=\"Request Mode\",\n info=\"'standard' uses deep data analysis, while 'fast' trades some depth of analysis for speed.\",\n options=[\"fast\", \"standard\"],\n value=\"fast\",\n advanced=True,\n ),\n IntInput(\n name=\"wait_for\",\n display_name=\"Wait For\",\n info=\"Seconds to wait for the page to load before extracting data.\",\n value=0,\n range_spec=RangeSpec(min=0, max=10, step_type=\"int\"),\n advanced=True,\n ),\n BoolInput(\n name=\"is_scroll_to_bottom_enabled\",\n display_name=\"Enable scroll to bottom\",\n info=\"Scroll to bottom of the page before extracting data.\",\n value=False,\n advanced=True,\n ),\n BoolInput(\n name=\"is_screenshot_enabled\",\n display_name=\"Enable screenshot\",\n info=\"Take a screenshot before extracting data. Returned in 'metadata' as a Base64 string.\",\n value=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"data\", method=\"build_output\"),\n ]\n\n def build_output(self) -> Data:\n endpoint = \"https://api.agentql.com/v1/query-data\"\n headers = {\n \"X-API-Key\": self.api_key,\n \"Content-Type\": \"application/json\",\n \"X-TF-Request-Origin\": \"langflow\",\n }\n\n payload = {\n \"url\": self.url,\n \"query\": self.query,\n \"prompt\": self.prompt,\n \"params\": {\n \"mode\": self.mode,\n \"wait_for\": self.wait_for,\n \"is_scroll_to_bottom_enabled\": self.is_scroll_to_bottom_enabled,\n \"is_screenshot_enabled\": self.is_screenshot_enabled,\n },\n \"metadata\": {\n \"experimental_stealth_mode_enabled\": self.is_stealth_mode_enabled,\n },\n }\n\n if not self.prompt and not self.query:\n self.status = \"Either Query or Prompt must be provided.\"\n raise ValueError(self.status)\n if self.prompt and self.query:\n self.status = \"Both Query and Prompt can't be provided at the same time.\"\n raise ValueError(self.status)\n\n try:\n response = httpx.post(endpoint, headers=headers, json=payload, timeout=self.timeout)\n response.raise_for_status()\n\n json = response.json()\n data = Data(result=json[\"data\"], metadata=json[\"metadata\"])\n\n except httpx.HTTPStatusError as e:\n response = e.response\n if response.status_code == httpx.codes.UNAUTHORIZED:\n self.status = \"Please, provide a valid API Key. You can create one at https://dev.agentql.com.\"\n else:\n try:\n error_json = response.json()\n logger.error(\n f\"Failure response: '{response.status_code} {response.reason_phrase}' with body: {error_json}\"\n )\n msg = error_json[\"error_info\"] if \"error_info\" in error_json else error_json[\"detail\"]\n except (ValueError, TypeError):\n msg = f\"HTTP {e}.\"\n self.status = msg\n raise ValueError(self.status) from e\n\n else:\n self.status = data\n return data\n"
+ "value": "import httpx\nfrom loguru import logger\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.io import (\n BoolInput,\n DropdownInput,\n IntInput,\n MessageTextInput,\n MultilineInput,\n Output,\n SecretStrInput,\n)\nfrom langflow.schema.data import Data\n\n\nclass AgentQL(Component):\n display_name = \"Extract Web Data\"\n description = \"Extracts structured data from a web page using an AgentQL query or a Natural Language description.\"\n documentation: str = \"https://docs.agentql.com/rest-api/api-reference\"\n icon = \"AgentQL\"\n name = \"AgentQL\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n required=True,\n password=True,\n info=\"Your AgentQL API key from dev.agentql.com\",\n ),\n MessageTextInput(\n name=\"url\",\n display_name=\"URL\",\n required=True,\n info=\"The URL of the public web page you want to extract data from.\",\n tool_mode=True,\n ),\n MultilineInput(\n name=\"query\",\n display_name=\"AgentQL Query\",\n required=False,\n info=\"The AgentQL query to execute. Learn more at https://docs.agentql.com/agentql-query or use a prompt.\",\n tool_mode=True,\n ),\n MultilineInput(\n name=\"prompt\",\n display_name=\"Prompt\",\n required=False,\n info=\"A Natural Language description of the data to extract from the page. Alternative to AgentQL query.\",\n tool_mode=True,\n ),\n BoolInput(\n name=\"is_stealth_mode_enabled\",\n display_name=\"Enable Stealth Mode (Beta)\",\n info=\"Enable experimental anti-bot evasion strategies. May not work for all websites at all times.\",\n value=False,\n advanced=True,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"Seconds to wait for a request.\",\n value=900,\n advanced=True,\n ),\n DropdownInput(\n name=\"mode\",\n display_name=\"Request Mode\",\n info=\"'standard' uses deep data analysis, while 'fast' trades some depth of analysis for speed.\",\n options=[\"fast\", \"standard\"],\n value=\"fast\",\n advanced=True,\n ),\n IntInput(\n name=\"wait_for\",\n display_name=\"Wait For\",\n info=\"Seconds to wait for the page to load before extracting data.\",\n value=0,\n range_spec=RangeSpec(min=0, max=10, step_type=\"int\"),\n advanced=True,\n ),\n BoolInput(\n name=\"is_scroll_to_bottom_enabled\",\n display_name=\"Enable scroll to bottom\",\n info=\"Scroll to bottom of the page before extracting data.\",\n value=False,\n advanced=True,\n ),\n BoolInput(\n name=\"is_screenshot_enabled\",\n display_name=\"Enable screenshot\",\n info=\"Take a screenshot before extracting data. Returned in 'metadata' as a Base64 string.\",\n value=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"data\", method=\"build_output\"),\n ]\n\n def build_output(self) -> Data:\n endpoint = \"https://api.agentql.com/v1/query-data\"\n headers = {\n \"X-API-Key\": self.api_key,\n \"Content-Type\": \"application/json\",\n \"X-TF-Request-Origin\": \"langflow\",\n }\n\n payload = {\n \"url\": self.url,\n \"query\": self.query,\n \"prompt\": self.prompt,\n \"params\": {\n \"mode\": self.mode,\n \"wait_for\": self.wait_for,\n \"is_scroll_to_bottom_enabled\": self.is_scroll_to_bottom_enabled,\n \"is_screenshot_enabled\": self.is_screenshot_enabled,\n },\n \"metadata\": {\n \"experimental_stealth_mode_enabled\": self.is_stealth_mode_enabled,\n },\n }\n\n if not self.prompt and not self.query:\n self.status = \"Either Query or Prompt must be provided.\"\n raise ValueError(self.status)\n if self.prompt and self.query:\n self.status = \"Both Query and Prompt can't be provided at the same time.\"\n raise ValueError(self.status)\n\n try:\n response = httpx.post(endpoint, headers=headers, json=payload, timeout=self.timeout)\n response.raise_for_status()\n\n json = response.json()\n data = Data(result=json[\"data\"], metadata=json[\"metadata\"])\n\n except httpx.HTTPStatusError as e:\n response = e.response\n if response.status_code == httpx.codes.UNAUTHORIZED:\n self.status = \"Please, provide a valid API Key. You can create one at https://dev.agentql.com.\"\n else:\n try:\n error_json = response.json()\n logger.error(\n f\"Failure response: '{response.status_code} {response.reason_phrase}' with body: {error_json}\"\n )\n msg = error_json[\"error_info\"] if \"error_info\" in error_json else error_json[\"detail\"]\n except (ValueError, TypeError):\n msg = f\"HTTP {e}.\"\n self.status = msg\n raise ValueError(self.status) from e\n\n else:\n self.status = data\n return data\n"
},
"is_screenshot_enabled": {
"_input_type": "BoolInput",
@@ -1775,7 +1775,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
+ "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
},
"handle_parsing_errors": {
"_input_type": "BoolInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Research Agent.json b/src/backend/base/langflow/initial_setup/starter_projects/Research Agent.json
index cc380a6d8..ee638afea 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Research Agent.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Research Agent.json
@@ -352,7 +352,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"previous_response": {
"advanced": false,
@@ -551,7 +551,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
@@ -803,7 +803,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"input_value": {
"advanced": false,
@@ -1123,7 +1123,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
+ "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
},
"handle_parsing_errors": {
"_input_type": "BoolInput",
@@ -1681,7 +1681,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"template": {
"_input_type": "PromptInput",
@@ -1809,7 +1809,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"template": {
"_input_type": "PromptInput",
@@ -2010,7 +2010,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import httpx\nfrom loguru import logger\n\nfrom langflow.custom import Component\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, Output, SecretStrInput\nfrom langflow.schema import Data\nfrom langflow.schema.dataframe import DataFrame\n\n\nclass TavilySearchComponent(Component):\n display_name = \"Tavily Search API\"\n description = \"\"\"**Tavily Search** is a search engine optimized for LLMs and RAG, \\\n aimed at efficient, quick, and persistent search results.\"\"\"\n icon = \"TavilyIcon\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Tavily API Key\",\n required=True,\n info=\"Your Tavily API Key.\",\n ),\n MessageTextInput(\n name=\"query\",\n display_name=\"Search Query\",\n info=\"The search query you want to execute with Tavily.\",\n tool_mode=True,\n ),\n DropdownInput(\n name=\"search_depth\",\n display_name=\"Search Depth\",\n info=\"The depth of the search.\",\n options=[\"basic\", \"advanced\"],\n value=\"advanced\",\n advanced=True,\n ),\n IntInput(\n name=\"chunks_per_source\",\n display_name=\"Chunks Per Source\",\n info=(\"The number of content chunks to retrieve from each source (1-3). Only works with advanced search.\"),\n value=3,\n advanced=True,\n ),\n DropdownInput(\n name=\"topic\",\n display_name=\"Search Topic\",\n info=\"The category of the search.\",\n options=[\"general\", \"news\"],\n value=\"general\",\n advanced=True,\n ),\n IntInput(\n name=\"days\",\n display_name=\"Days\",\n info=\"Number of days back from current date to include. Only available with news topic.\",\n value=7,\n advanced=True,\n ),\n IntInput(\n name=\"max_results\",\n display_name=\"Max Results\",\n info=\"The maximum number of search results to return.\",\n value=5,\n advanced=True,\n ),\n BoolInput(\n name=\"include_answer\",\n display_name=\"Include Answer\",\n info=\"Include a short answer to original query.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"time_range\",\n display_name=\"Time Range\",\n info=\"The time range back from the current date to filter results.\",\n options=[\"day\", \"week\", \"month\", \"year\"],\n value=None, # Default to None to make it optional\n advanced=True,\n ),\n BoolInput(\n name=\"include_images\",\n display_name=\"Include Images\",\n info=\"Include a list of query-related images in the response.\",\n value=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"include_domains\",\n display_name=\"Include Domains\",\n info=\"Comma-separated list of domains to include in the search results.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"exclude_domains\",\n display_name=\"Exclude Domains\",\n info=\"Comma-separated list of domains to exclude from the search results.\",\n advanced=True,\n ),\n BoolInput(\n name=\"include_raw_content\",\n display_name=\"Include Raw Content\",\n info=\"Include the cleaned and parsed HTML content of each search result.\",\n value=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"fetch_content_dataframe\"),\n ]\n\n def fetch_content(self) -> list[Data]:\n try:\n # Only process domains if they're provided\n include_domains = None\n exclude_domains = None\n\n if self.include_domains:\n include_domains = [domain.strip() for domain in self.include_domains.split(\",\") if domain.strip()]\n\n if self.exclude_domains:\n exclude_domains = [domain.strip() for domain in self.exclude_domains.split(\",\") if domain.strip()]\n\n url = \"https://api.tavily.com/search\"\n headers = {\n \"content-type\": \"application/json\",\n \"accept\": \"application/json\",\n }\n\n payload = {\n \"api_key\": self.api_key,\n \"query\": self.query,\n \"search_depth\": self.search_depth,\n \"topic\": self.topic,\n \"max_results\": self.max_results,\n \"include_images\": self.include_images,\n \"include_answer\": self.include_answer,\n \"include_raw_content\": self.include_raw_content,\n \"days\": self.days,\n \"time_range\": self.time_range,\n }\n\n # Only add domains to payload if they exist and have values\n if include_domains:\n payload[\"include_domains\"] = include_domains\n if exclude_domains:\n payload[\"exclude_domains\"] = exclude_domains\n\n # Add conditional parameters only if they should be included\n if self.search_depth == \"advanced\" and self.chunks_per_source:\n payload[\"chunks_per_source\"] = self.chunks_per_source\n\n if self.topic == \"news\" and self.days:\n payload[\"days\"] = int(self.days) # Ensure days is an integer\n\n # Add time_range if it's set\n if hasattr(self, \"time_range\") and self.time_range:\n payload[\"time_range\"] = self.time_range\n\n # Add timeout handling\n with httpx.Client(timeout=90.0) as client:\n response = client.post(url, json=payload, headers=headers)\n\n response.raise_for_status()\n search_results = response.json()\n\n data_results = []\n\n if self.include_answer and search_results.get(\"answer\"):\n data_results.append(Data(text=search_results[\"answer\"]))\n\n for result in search_results.get(\"results\", []):\n content = result.get(\"content\", \"\")\n result_data = {\n \"title\": result.get(\"title\"),\n \"url\": result.get(\"url\"),\n \"content\": content,\n \"score\": result.get(\"score\"),\n }\n if self.include_raw_content:\n result_data[\"raw_content\"] = result.get(\"raw_content\")\n\n data_results.append(Data(text=content, data=result_data))\n\n if self.include_images and search_results.get(\"images\"):\n data_results.append(Data(text=\"Images found\", data={\"images\": search_results[\"images\"]}))\n\n except httpx.TimeoutException:\n error_message = \"Request timed out (90s). Please try again or adjust parameters.\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.HTTPStatusError as exc:\n error_message = f\"HTTP error occurred: {exc.response.status_code} - {exc.response.text}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.RequestError as exc:\n error_message = f\"Request error occurred: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except ValueError as exc:\n error_message = f\"Invalid response format: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n else:\n self.status = data_results\n return data_results\n\n def fetch_content_dataframe(self) -> DataFrame:\n data = self.fetch_content()\n return DataFrame(data)\n"
+ "value": "import httpx\nfrom loguru import logger\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.template.field.base import Output\n\n\nclass TavilySearchComponent(Component):\n display_name = \"Tavily Search API\"\n description = \"\"\"**Tavily Search** is a search engine optimized for LLMs and RAG, \\\n aimed at efficient, quick, and persistent search results.\"\"\"\n icon = \"TavilyIcon\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Tavily API Key\",\n required=True,\n info=\"Your Tavily API Key.\",\n ),\n MessageTextInput(\n name=\"query\",\n display_name=\"Search Query\",\n info=\"The search query you want to execute with Tavily.\",\n tool_mode=True,\n ),\n DropdownInput(\n name=\"search_depth\",\n display_name=\"Search Depth\",\n info=\"The depth of the search.\",\n options=[\"basic\", \"advanced\"],\n value=\"advanced\",\n advanced=True,\n ),\n IntInput(\n name=\"chunks_per_source\",\n display_name=\"Chunks Per Source\",\n info=(\"The number of content chunks to retrieve from each source (1-3). Only works with advanced search.\"),\n value=3,\n advanced=True,\n ),\n DropdownInput(\n name=\"topic\",\n display_name=\"Search Topic\",\n info=\"The category of the search.\",\n options=[\"general\", \"news\"],\n value=\"general\",\n advanced=True,\n ),\n IntInput(\n name=\"days\",\n display_name=\"Days\",\n info=\"Number of days back from current date to include. Only available with news topic.\",\n value=7,\n advanced=True,\n ),\n IntInput(\n name=\"max_results\",\n display_name=\"Max Results\",\n info=\"The maximum number of search results to return.\",\n value=5,\n advanced=True,\n ),\n BoolInput(\n name=\"include_answer\",\n display_name=\"Include Answer\",\n info=\"Include a short answer to original query.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"time_range\",\n display_name=\"Time Range\",\n info=\"The time range back from the current date to filter results.\",\n options=[\"day\", \"week\", \"month\", \"year\"],\n value=None, # Default to None to make it optional\n advanced=True,\n ),\n BoolInput(\n name=\"include_images\",\n display_name=\"Include Images\",\n info=\"Include a list of query-related images in the response.\",\n value=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"include_domains\",\n display_name=\"Include Domains\",\n info=\"Comma-separated list of domains to include in the search results.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"exclude_domains\",\n display_name=\"Exclude Domains\",\n info=\"Comma-separated list of domains to exclude from the search results.\",\n advanced=True,\n ),\n BoolInput(\n name=\"include_raw_content\",\n display_name=\"Include Raw Content\",\n info=\"Include the cleaned and parsed HTML content of each search result.\",\n value=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"fetch_content_dataframe\"),\n ]\n\n def fetch_content(self) -> list[Data]:\n try:\n # Only process domains if they're provided\n include_domains = None\n exclude_domains = None\n\n if self.include_domains:\n include_domains = [domain.strip() for domain in self.include_domains.split(\",\") if domain.strip()]\n\n if self.exclude_domains:\n exclude_domains = [domain.strip() for domain in self.exclude_domains.split(\",\") if domain.strip()]\n\n url = \"https://api.tavily.com/search\"\n headers = {\n \"content-type\": \"application/json\",\n \"accept\": \"application/json\",\n }\n\n payload = {\n \"api_key\": self.api_key,\n \"query\": self.query,\n \"search_depth\": self.search_depth,\n \"topic\": self.topic,\n \"max_results\": self.max_results,\n \"include_images\": self.include_images,\n \"include_answer\": self.include_answer,\n \"include_raw_content\": self.include_raw_content,\n \"days\": self.days,\n \"time_range\": self.time_range,\n }\n\n # Only add domains to payload if they exist and have values\n if include_domains:\n payload[\"include_domains\"] = include_domains\n if exclude_domains:\n payload[\"exclude_domains\"] = exclude_domains\n\n # Add conditional parameters only if they should be included\n if self.search_depth == \"advanced\" and self.chunks_per_source:\n payload[\"chunks_per_source\"] = self.chunks_per_source\n\n if self.topic == \"news\" and self.days:\n payload[\"days\"] = int(self.days) # Ensure days is an integer\n\n # Add time_range if it's set\n if hasattr(self, \"time_range\") and self.time_range:\n payload[\"time_range\"] = self.time_range\n\n # Add timeout handling\n with httpx.Client(timeout=90.0) as client:\n response = client.post(url, json=payload, headers=headers)\n\n response.raise_for_status()\n search_results = response.json()\n\n data_results = []\n\n if self.include_answer and search_results.get(\"answer\"):\n data_results.append(Data(text=search_results[\"answer\"]))\n\n for result in search_results.get(\"results\", []):\n content = result.get(\"content\", \"\")\n result_data = {\n \"title\": result.get(\"title\"),\n \"url\": result.get(\"url\"),\n \"content\": content,\n \"score\": result.get(\"score\"),\n }\n if self.include_raw_content:\n result_data[\"raw_content\"] = result.get(\"raw_content\")\n\n data_results.append(Data(text=content, data=result_data))\n\n if self.include_images and search_results.get(\"images\"):\n data_results.append(Data(text=\"Images found\", data={\"images\": search_results[\"images\"]}))\n\n except httpx.TimeoutException:\n error_message = \"Request timed out (90s). Please try again or adjust parameters.\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.HTTPStatusError as exc:\n error_message = f\"HTTP error occurred: {exc.response.status_code} - {exc.response.text}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.RequestError as exc:\n error_message = f\"Request error occurred: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except ValueError as exc:\n error_message = f\"Invalid response format: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n else:\n self.status = data_results\n return data_results\n\n def fetch_content_dataframe(self) -> DataFrame:\n data = self.fetch_content()\n return DataFrame(data)\n"
},
"days": {
"_input_type": "IntInput",
@@ -2485,7 +2485,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
+ "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
},
"input_value": {
"_input_type": "MessageInput",
@@ -2878,7 +2878,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
+ "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
},
"input_value": {
"_input_type": "MessageInput",
@@ -3291,7 +3291,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Research Translation Loop.json b/src/backend/base/langflow/initial_setup/starter_projects/Research Translation Loop.json
index 0d35ccd11..065c08e32 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Research Translation Loop.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Research Translation Loop.json
@@ -263,7 +263,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import urllib.request\nfrom urllib.parse import urlparse\nfrom xml.etree.ElementTree import Element\n\nfrom defusedxml.ElementTree import fromstring\n\nfrom langflow.custom import Component\nfrom langflow.io import DropdownInput, IntInput, MessageTextInput, Output\nfrom langflow.schema import Data, DataFrame\n\n\nclass ArXivComponent(Component):\n display_name = \"arXiv\"\n description = \"Search and retrieve papers from arXiv.org\"\n icon = \"arXiv\"\n\n inputs = [\n MessageTextInput(\n name=\"search_query\",\n display_name=\"Search Query\",\n info=\"The search query for arXiv papers (e.g., 'quantum computing')\",\n tool_mode=True,\n ),\n DropdownInput(\n name=\"search_type\",\n display_name=\"Search Field\",\n info=\"The field to search in\",\n options=[\"all\", \"title\", \"abstract\", \"author\", \"cat\"], # cat is for category\n value=\"all\",\n ),\n IntInput(\n name=\"max_results\",\n display_name=\"Max Results\",\n info=\"Maximum number of results to return\",\n value=10,\n ),\n ]\n\n outputs = [\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"search_papers_dataframe\"),\n ]\n\n def build_query_url(self) -> str:\n \"\"\"Build the arXiv API query URL.\"\"\"\n base_url = \"http://export.arxiv.org/api/query?\"\n\n # Build the search query\n search_query = f\"{self.search_type}:{self.search_query}\"\n\n # URL parameters\n params = {\n \"search_query\": search_query,\n \"max_results\": str(self.max_results),\n }\n\n # Convert params to URL query string\n query_string = \"&\".join([f\"{k}={urllib.parse.quote(str(v))}\" for k, v in params.items()])\n\n return base_url + query_string\n\n def parse_atom_response(self, response_text: str) -> list[dict]:\n \"\"\"Parse the Atom XML response from arXiv.\"\"\"\n # Parse XML safely using defusedxml\n root = fromstring(response_text)\n\n # Define namespace dictionary for XML parsing\n ns = {\"atom\": \"http://www.w3.org/2005/Atom\", \"arxiv\": \"http://arxiv.org/schemas/atom\"}\n\n papers = []\n # Process each entry (paper)\n for entry in root.findall(\"atom:entry\", ns):\n paper = {\n \"id\": self._get_text(entry, \"atom:id\", ns),\n \"title\": self._get_text(entry, \"atom:title\", ns),\n \"summary\": self._get_text(entry, \"atom:summary\", ns),\n \"published\": self._get_text(entry, \"atom:published\", ns),\n \"updated\": self._get_text(entry, \"atom:updated\", ns),\n \"authors\": [author.find(\"atom:name\", ns).text for author in entry.findall(\"atom:author\", ns)],\n \"arxiv_url\": self._get_link(entry, \"alternate\", ns),\n \"pdf_url\": self._get_link(entry, \"related\", ns),\n \"comment\": self._get_text(entry, \"arxiv:comment\", ns),\n \"journal_ref\": self._get_text(entry, \"arxiv:journal_ref\", ns),\n \"primary_category\": self._get_category(entry, ns),\n \"categories\": [cat.get(\"term\") for cat in entry.findall(\"atom:category\", ns)],\n }\n papers.append(paper)\n\n return papers\n\n def _get_text(self, element: Element, path: str, ns: dict) -> str | None:\n \"\"\"Safely extract text from an XML element.\"\"\"\n el = element.find(path, ns)\n return el.text.strip() if el is not None and el.text else None\n\n def _get_link(self, element: Element, rel: str, ns: dict) -> str | None:\n \"\"\"Get link URL based on relation type.\"\"\"\n for link in element.findall(\"atom:link\", ns):\n if link.get(\"rel\") == rel:\n return link.get(\"href\")\n return None\n\n def _get_category(self, element: Element, ns: dict) -> str | None:\n \"\"\"Get primary category.\"\"\"\n cat = element.find(\"arxiv:primary_category\", ns)\n return cat.get(\"term\") if cat is not None else None\n\n def run_model(self) -> DataFrame:\n return self.search_papers_dataframe()\n\n def search_papers(self) -> list[Data]:\n \"\"\"Search arXiv and return results.\"\"\"\n try:\n # Build the query URL\n url = self.build_query_url()\n\n # Validate URL scheme and host\n parsed_url = urlparse(url)\n if parsed_url.scheme not in {\"http\", \"https\"}:\n error_msg = f\"Invalid URL scheme: {parsed_url.scheme}\"\n raise ValueError(error_msg)\n if parsed_url.hostname != \"export.arxiv.org\":\n error_msg = f\"Invalid host: {parsed_url.hostname}\"\n raise ValueError(error_msg)\n\n # Create a custom opener that only allows http/https schemes\n class RestrictedHTTPHandler(urllib.request.HTTPHandler):\n def http_open(self, req):\n return super().http_open(req)\n\n class RestrictedHTTPSHandler(urllib.request.HTTPSHandler):\n def https_open(self, req):\n return super().https_open(req)\n\n # Build opener with restricted handlers\n opener = urllib.request.build_opener(RestrictedHTTPHandler, RestrictedHTTPSHandler)\n urllib.request.install_opener(opener)\n\n # Make the request with validated URL using restricted opener\n response = opener.open(url)\n response_text = response.read().decode(\"utf-8\")\n\n # Parse the response\n papers = self.parse_atom_response(response_text)\n\n # Convert to Data objects\n results = [Data(data=paper) for paper in papers]\n self.status = results\n except (urllib.error.URLError, ValueError) as e:\n error_data = Data(data={\"error\": f\"Request error: {e!s}\"})\n self.status = error_data\n return [error_data]\n else:\n return results\n\n def search_papers_dataframe(self) -> DataFrame:\n \"\"\"Convert the Arxiv search results to a DataFrame.\n\n Returns:\n DataFrame: A DataFrame containing the search results.\n \"\"\"\n data = self.search_papers()\n return DataFrame(data)\n"
+ "value": "import urllib.request\nfrom urllib.parse import urlparse\nfrom xml.etree.ElementTree import Element\n\nfrom defusedxml.ElementTree import fromstring\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.io import DropdownInput, IntInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\n\n\nclass ArXivComponent(Component):\n display_name = \"arXiv\"\n description = \"Search and retrieve papers from arXiv.org\"\n icon = \"arXiv\"\n\n inputs = [\n MessageTextInput(\n name=\"search_query\",\n display_name=\"Search Query\",\n info=\"The search query for arXiv papers (e.g., 'quantum computing')\",\n tool_mode=True,\n ),\n DropdownInput(\n name=\"search_type\",\n display_name=\"Search Field\",\n info=\"The field to search in\",\n options=[\"all\", \"title\", \"abstract\", \"author\", \"cat\"], # cat is for category\n value=\"all\",\n ),\n IntInput(\n name=\"max_results\",\n display_name=\"Max Results\",\n info=\"Maximum number of results to return\",\n value=10,\n ),\n ]\n\n outputs = [\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"search_papers_dataframe\"),\n ]\n\n def build_query_url(self) -> str:\n \"\"\"Build the arXiv API query URL.\"\"\"\n base_url = \"http://export.arxiv.org/api/query?\"\n\n # Build the search query\n search_query = f\"{self.search_type}:{self.search_query}\"\n\n # URL parameters\n params = {\n \"search_query\": search_query,\n \"max_results\": str(self.max_results),\n }\n\n # Convert params to URL query string\n query_string = \"&\".join([f\"{k}={urllib.parse.quote(str(v))}\" for k, v in params.items()])\n\n return base_url + query_string\n\n def parse_atom_response(self, response_text: str) -> list[dict]:\n \"\"\"Parse the Atom XML response from arXiv.\"\"\"\n # Parse XML safely using defusedxml\n root = fromstring(response_text)\n\n # Define namespace dictionary for XML parsing\n ns = {\"atom\": \"http://www.w3.org/2005/Atom\", \"arxiv\": \"http://arxiv.org/schemas/atom\"}\n\n papers = []\n # Process each entry (paper)\n for entry in root.findall(\"atom:entry\", ns):\n paper = {\n \"id\": self._get_text(entry, \"atom:id\", ns),\n \"title\": self._get_text(entry, \"atom:title\", ns),\n \"summary\": self._get_text(entry, \"atom:summary\", ns),\n \"published\": self._get_text(entry, \"atom:published\", ns),\n \"updated\": self._get_text(entry, \"atom:updated\", ns),\n \"authors\": [author.find(\"atom:name\", ns).text for author in entry.findall(\"atom:author\", ns)],\n \"arxiv_url\": self._get_link(entry, \"alternate\", ns),\n \"pdf_url\": self._get_link(entry, \"related\", ns),\n \"comment\": self._get_text(entry, \"arxiv:comment\", ns),\n \"journal_ref\": self._get_text(entry, \"arxiv:journal_ref\", ns),\n \"primary_category\": self._get_category(entry, ns),\n \"categories\": [cat.get(\"term\") for cat in entry.findall(\"atom:category\", ns)],\n }\n papers.append(paper)\n\n return papers\n\n def _get_text(self, element: Element, path: str, ns: dict) -> str | None:\n \"\"\"Safely extract text from an XML element.\"\"\"\n el = element.find(path, ns)\n return el.text.strip() if el is not None and el.text else None\n\n def _get_link(self, element: Element, rel: str, ns: dict) -> str | None:\n \"\"\"Get link URL based on relation type.\"\"\"\n for link in element.findall(\"atom:link\", ns):\n if link.get(\"rel\") == rel:\n return link.get(\"href\")\n return None\n\n def _get_category(self, element: Element, ns: dict) -> str | None:\n \"\"\"Get primary category.\"\"\"\n cat = element.find(\"arxiv:primary_category\", ns)\n return cat.get(\"term\") if cat is not None else None\n\n def run_model(self) -> DataFrame:\n return self.search_papers_dataframe()\n\n def search_papers(self) -> list[Data]:\n \"\"\"Search arXiv and return results.\"\"\"\n try:\n # Build the query URL\n url = self.build_query_url()\n\n # Validate URL scheme and host\n parsed_url = urlparse(url)\n if parsed_url.scheme not in {\"http\", \"https\"}:\n error_msg = f\"Invalid URL scheme: {parsed_url.scheme}\"\n raise ValueError(error_msg)\n if parsed_url.hostname != \"export.arxiv.org\":\n error_msg = f\"Invalid host: {parsed_url.hostname}\"\n raise ValueError(error_msg)\n\n # Create a custom opener that only allows http/https schemes\n class RestrictedHTTPHandler(urllib.request.HTTPHandler):\n def http_open(self, req):\n return super().http_open(req)\n\n class RestrictedHTTPSHandler(urllib.request.HTTPSHandler):\n def https_open(self, req):\n return super().https_open(req)\n\n # Build opener with restricted handlers\n opener = urllib.request.build_opener(RestrictedHTTPHandler, RestrictedHTTPSHandler)\n urllib.request.install_opener(opener)\n\n # Make the request with validated URL using restricted opener\n response = opener.open(url)\n response_text = response.read().decode(\"utf-8\")\n\n # Parse the response\n papers = self.parse_atom_response(response_text)\n\n # Convert to Data objects\n results = [Data(data=paper) for paper in papers]\n self.status = results\n except (urllib.error.URLError, ValueError) as e:\n error_data = Data(data={\"error\": f\"Request error: {e!s}\"})\n self.status = error_data\n return [error_data]\n else:\n return results\n\n def search_papers_dataframe(self) -> DataFrame:\n \"\"\"Convert the Arxiv search results to a DataFrame.\n\n Returns:\n DataFrame: A DataFrame containing the search results.\n \"\"\"\n data = self.search_papers()\n return DataFrame(data)\n"
},
"max_results": {
"_input_type": "IntInput",
@@ -755,7 +755,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from loguru import logger\n\nfrom langflow.custom import Component\nfrom langflow.io import MessageInput, Output\nfrom langflow.schema import Data\nfrom langflow.schema.message import Message\n\n\nclass MessageToDataComponent(Component):\n display_name = \"Message to Data\"\n description = \"Convert a Message object to a Data object\"\n icon = \"message-square-share\"\n beta = True\n name = \"MessagetoData\"\n legacy = True\n\n inputs = [\n MessageInput(\n name=\"message\",\n display_name=\"Message\",\n info=\"The Message object to convert to a Data object\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"data\", method=\"convert_message_to_data\"),\n ]\n\n def convert_message_to_data(self) -> Data:\n if isinstance(self.message, Message):\n # Convert Message to Data\n return Data(data=self.message.data)\n\n msg = \"Error converting Message to Data: Input must be a Message object\"\n logger.opt(exception=True).debug(msg)\n self.status = msg\n return Data(data={\"error\": msg})\n"
+ "value": "from loguru import logger\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.io import MessageInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.message import Message\n\n\nclass MessageToDataComponent(Component):\n display_name = \"Message to Data\"\n description = \"Convert a Message object to a Data object\"\n icon = \"message-square-share\"\n beta = True\n name = \"MessagetoData\"\n legacy = True\n\n inputs = [\n MessageInput(\n name=\"message\",\n display_name=\"Message\",\n info=\"The Message object to convert to a Data object\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"data\", method=\"convert_message_to_data\"),\n ]\n\n def convert_message_to_data(self) -> Data:\n if isinstance(self.message, Message):\n # Convert Message to Data\n return Data(data=self.message.data)\n\n msg = \"Error converting Message to Data: Input must be a Message object\"\n logger.opt(exception=True).debug(msg)\n self.status = msg\n return Data(data={\"error\": msg})\n"
},
"message": {
"_input_type": "MessageInput",
@@ -930,7 +930,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -1222,7 +1222,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
@@ -1539,7 +1539,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.custom import Component\nfrom langflow.helpers.data import safe_convert\nfrom langflow.io import (\n BoolInput,\n HandleInput,\n MessageTextInput,\n MultilineInput,\n Output,\n TabInput,\n)\nfrom langflow.schema import Data, DataFrame\nfrom langflow.schema.message import Message\n\n\nclass ParserComponent(Component):\n display_name = \"Parser\"\n description = \"Extracts text using a template.\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"input_data\",\n display_name=\"Data or DataFrame\",\n input_types=[\"DataFrame\", \"Data\"],\n info=\"Accepts either a DataFrame or a Data object.\",\n required=True,\n ),\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Parser\", \"Stringify\"],\n value=\"Parser\",\n info=\"Convert into raw string instead of using a template.\",\n real_time_refresh=True,\n ),\n MultilineInput(\n name=\"pattern\",\n display_name=\"Template\",\n info=(\n \"Use variables within curly brackets to extract column values for DataFrames \"\n \"or key values for Data.\"\n \"For example: `Name: {Name}, Age: {Age}, Country: {Country}`\"\n ),\n value=\"Text: {text}\", # Example default\n dynamic=True,\n show=True,\n required=True,\n ),\n MessageTextInput(\n name=\"sep\",\n display_name=\"Separator\",\n advanced=True,\n value=\"\\n\",\n info=\"String used to separate rows/items.\",\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"Parsed Text\",\n name=\"parsed_text\",\n info=\"Formatted text output.\",\n method=\"parse_combined_text\",\n ),\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n \"\"\"Dynamically hide/show `template` and enforce requirement based on `stringify`.\"\"\"\n if field_name == \"mode\":\n build_config[\"pattern\"][\"show\"] = self.mode == \"Parser\"\n build_config[\"pattern\"][\"required\"] = self.mode == \"Parser\"\n if field_value:\n clean_data = BoolInput(\n name=\"clean_data\",\n display_name=\"Clean Data\",\n info=(\n \"Enable to clean the data by removing empty rows and lines \"\n \"in each cell of the DataFrame/ Data object.\"\n ),\n value=True,\n advanced=True,\n required=False,\n )\n build_config[\"clean_data\"] = clean_data.to_dict()\n else:\n build_config.pop(\"clean_data\", None)\n\n return build_config\n\n def _clean_args(self):\n \"\"\"Prepare arguments based on input type.\"\"\"\n input_data = self.input_data\n\n match input_data:\n case list() if all(isinstance(item, Data) for item in input_data):\n msg = \"List of Data objects is not supported.\"\n raise ValueError(msg)\n case DataFrame():\n return input_data, None\n case Data():\n return None, input_data\n case dict() if \"data\" in input_data:\n try:\n if \"columns\" in input_data: # Likely a DataFrame\n return DataFrame.from_dict(input_data), None\n # Likely a Data object\n return None, Data(**input_data)\n except (TypeError, ValueError, KeyError) as e:\n msg = f\"Invalid structured input provided: {e!s}\"\n raise ValueError(msg) from e\n case _:\n msg = f\"Unsupported input type: {type(input_data)}. Expected DataFrame or Data.\"\n raise ValueError(msg)\n\n def parse_combined_text(self) -> Message:\n \"\"\"Parse all rows/items into a single text or convert input to string if `stringify` is enabled.\"\"\"\n # Early return for stringify option\n if self.mode == \"Stringify\":\n return self.convert_to_string()\n\n df, data = self._clean_args()\n\n lines = []\n if df is not None:\n for _, row in df.iterrows():\n formatted_text = self.pattern.format(**row.to_dict())\n lines.append(formatted_text)\n elif data is not None:\n formatted_text = self.pattern.format(**data.data)\n lines.append(formatted_text)\n\n combined_text = self.sep.join(lines)\n self.status = combined_text\n return Message(text=combined_text)\n\n def convert_to_string(self) -> Message:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n result = \"\"\n if isinstance(self.input_data, list):\n result = \"\\n\".join([safe_convert(item, clean_data=self.clean_data or False) for item in self.input_data])\n else:\n result = safe_convert(self.input_data or False)\n self.log(f\"Converted to string with length: {len(result)}\")\n\n message = Message(text=result)\n self.status = message\n return message\n"
+ "value": "from langflow.custom.custom_component.component import Component\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, HandleInput, MessageTextInput, MultilineInput, TabInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.template.field.base import Output\n\n\nclass ParserComponent(Component):\n display_name = \"Parser\"\n description = \"Extracts text using a template.\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"input_data\",\n display_name=\"Data or DataFrame\",\n input_types=[\"DataFrame\", \"Data\"],\n info=\"Accepts either a DataFrame or a Data object.\",\n required=True,\n ),\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Parser\", \"Stringify\"],\n value=\"Parser\",\n info=\"Convert into raw string instead of using a template.\",\n real_time_refresh=True,\n ),\n MultilineInput(\n name=\"pattern\",\n display_name=\"Template\",\n info=(\n \"Use variables within curly brackets to extract column values for DataFrames \"\n \"or key values for Data.\"\n \"For example: `Name: {Name}, Age: {Age}, Country: {Country}`\"\n ),\n value=\"Text: {text}\", # Example default\n dynamic=True,\n show=True,\n required=True,\n ),\n MessageTextInput(\n name=\"sep\",\n display_name=\"Separator\",\n advanced=True,\n value=\"\\n\",\n info=\"String used to separate rows/items.\",\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"Parsed Text\",\n name=\"parsed_text\",\n info=\"Formatted text output.\",\n method=\"parse_combined_text\",\n ),\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n \"\"\"Dynamically hide/show `template` and enforce requirement based on `stringify`.\"\"\"\n if field_name == \"mode\":\n build_config[\"pattern\"][\"show\"] = self.mode == \"Parser\"\n build_config[\"pattern\"][\"required\"] = self.mode == \"Parser\"\n if field_value:\n clean_data = BoolInput(\n name=\"clean_data\",\n display_name=\"Clean Data\",\n info=(\n \"Enable to clean the data by removing empty rows and lines \"\n \"in each cell of the DataFrame/ Data object.\"\n ),\n value=True,\n advanced=True,\n required=False,\n )\n build_config[\"clean_data\"] = clean_data.to_dict()\n else:\n build_config.pop(\"clean_data\", None)\n\n return build_config\n\n def _clean_args(self):\n \"\"\"Prepare arguments based on input type.\"\"\"\n input_data = self.input_data\n\n match input_data:\n case list() if all(isinstance(item, Data) for item in input_data):\n msg = \"List of Data objects is not supported.\"\n raise ValueError(msg)\n case DataFrame():\n return input_data, None\n case Data():\n return None, input_data\n case dict() if \"data\" in input_data:\n try:\n if \"columns\" in input_data: # Likely a DataFrame\n return DataFrame.from_dict(input_data), None\n # Likely a Data object\n return None, Data(**input_data)\n except (TypeError, ValueError, KeyError) as e:\n msg = f\"Invalid structured input provided: {e!s}\"\n raise ValueError(msg) from e\n case _:\n msg = f\"Unsupported input type: {type(input_data)}. Expected DataFrame or Data.\"\n raise ValueError(msg)\n\n def parse_combined_text(self) -> Message:\n \"\"\"Parse all rows/items into a single text or convert input to string if `stringify` is enabled.\"\"\"\n # Early return for stringify option\n if self.mode == \"Stringify\":\n return self.convert_to_string()\n\n df, data = self._clean_args()\n\n lines = []\n if df is not None:\n for _, row in df.iterrows():\n formatted_text = self.pattern.format(**row.to_dict())\n lines.append(formatted_text)\n elif data is not None:\n formatted_text = self.pattern.format(**data.data)\n lines.append(formatted_text)\n\n combined_text = self.sep.join(lines)\n self.status = combined_text\n return Message(text=combined_text)\n\n def convert_to_string(self) -> Message:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n result = \"\"\n if isinstance(self.input_data, list):\n result = \"\\n\".join([safe_convert(item, clean_data=self.clean_data or False) for item in self.input_data])\n else:\n result = safe_convert(self.input_data or False)\n self.log(f\"Converted to string with length: {len(result)}\")\n\n message = Message(text=result)\n self.status = message\n return message\n"
},
"input_data": {
"_input_type": "HandleInput",
@@ -1727,7 +1727,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.custom import Component\nfrom langflow.io import HandleInput, Output\nfrom langflow.schema import Data\nfrom langflow.schema.dataframe import DataFrame\n\n\nclass LoopComponent(Component):\n display_name = \"Loop\"\n description = (\n \"Iterates over a list of Data objects, outputting one item at a time and aggregating results from loop inputs.\"\n )\n icon = \"infinity\"\n\n inputs = [\n HandleInput(\n name=\"data\",\n display_name=\"Inputs\",\n info=\"The initial list of Data objects or DataFrame to iterate over.\",\n input_types=[\"DataFrame\"],\n ),\n ]\n\n outputs = [\n Output(display_name=\"Item\", name=\"item\", method=\"item_output\", allows_loop=True, group_outputs=True),\n Output(display_name=\"Done\", name=\"done\", method=\"done_output\", group_outputs=True),\n ]\n\n def initialize_data(self) -> None:\n \"\"\"Initialize the data list, context index, and aggregated list.\"\"\"\n if self.ctx.get(f\"{self._id}_initialized\", False):\n return\n\n # Ensure data is a list of Data objects\n data_list = self._validate_data(self.data)\n\n # Store the initial data and context variables\n self.update_ctx(\n {\n f\"{self._id}_data\": data_list,\n f\"{self._id}_index\": 0,\n f\"{self._id}_aggregated\": [],\n f\"{self._id}_initialized\": True,\n }\n )\n\n def _validate_data(self, data):\n \"\"\"Validate and return a list of Data objects.\"\"\"\n if isinstance(data, DataFrame):\n return data.to_data_list()\n if isinstance(data, Data):\n return [data]\n if isinstance(data, list) and all(isinstance(item, Data) for item in data):\n return data\n msg = \"The 'data' input must be a DataFrame, a list of Data objects, or a single Data object.\"\n raise TypeError(msg)\n\n def evaluate_stop_loop(self) -> bool:\n \"\"\"Evaluate whether to stop item or done output.\"\"\"\n current_index = self.ctx.get(f\"{self._id}_index\", 0)\n data_length = len(self.ctx.get(f\"{self._id}_data\", []))\n return current_index > data_length\n\n def item_output(self) -> Data:\n \"\"\"Output the next item in the list or stop if done.\"\"\"\n self.initialize_data()\n current_item = Data(text=\"\")\n\n if self.evaluate_stop_loop():\n self.stop(\"item\")\n else:\n # Get data list and current index\n data_list, current_index = self.loop_variables()\n if current_index < len(data_list):\n # Output current item and increment index\n try:\n current_item = data_list[current_index]\n except IndexError:\n current_item = Data(text=\"\")\n self.aggregated_output()\n self.update_ctx({f\"{self._id}_index\": current_index + 1})\n\n # Now we need to update the dependencies for the next run\n self.update_dependency()\n return current_item\n\n def update_dependency(self):\n item_dependency_id = self.get_incoming_edge_by_target_param(\"item\")\n\n self.graph.run_manager.run_predecessors[self._id].append(item_dependency_id)\n\n def done_output(self) -> DataFrame:\n \"\"\"Trigger the done output when iteration is complete.\"\"\"\n self.initialize_data()\n\n if self.evaluate_stop_loop():\n self.stop(\"item\")\n self.start(\"done\")\n\n aggregated = self.ctx.get(f\"{self._id}_aggregated\", [])\n\n return DataFrame(aggregated)\n self.stop(\"done\")\n return DataFrame([])\n\n def loop_variables(self):\n \"\"\"Retrieve loop variables from context.\"\"\"\n return (\n self.ctx.get(f\"{self._id}_data\", []),\n self.ctx.get(f\"{self._id}_index\", 0),\n )\n\n def aggregated_output(self) -> list[Data]:\n \"\"\"Return the aggregated list once all items are processed.\"\"\"\n self.initialize_data()\n\n # Get data list and aggregated list\n data_list = self.ctx.get(f\"{self._id}_data\", [])\n aggregated = self.ctx.get(f\"{self._id}_aggregated\", [])\n loop_input = self.item\n if loop_input is not None and not isinstance(loop_input, str) and len(aggregated) <= len(data_list):\n aggregated.append(loop_input)\n self.update_ctx({f\"{self._id}_aggregated\": aggregated})\n return aggregated\n"
+ "value": "from langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.template.field.base import Output\n\n\nclass LoopComponent(Component):\n display_name = \"Loop\"\n description = (\n \"Iterates over a list of Data objects, outputting one item at a time and aggregating results from loop inputs.\"\n )\n icon = \"infinity\"\n\n inputs = [\n HandleInput(\n name=\"data\",\n display_name=\"Inputs\",\n info=\"The initial list of Data objects or DataFrame to iterate over.\",\n input_types=[\"DataFrame\"],\n ),\n ]\n\n outputs = [\n Output(display_name=\"Item\", name=\"item\", method=\"item_output\", allows_loop=True, group_outputs=True),\n Output(display_name=\"Done\", name=\"done\", method=\"done_output\", group_outputs=True),\n ]\n\n def initialize_data(self) -> None:\n \"\"\"Initialize the data list, context index, and aggregated list.\"\"\"\n if self.ctx.get(f\"{self._id}_initialized\", False):\n return\n\n # Ensure data is a list of Data objects\n data_list = self._validate_data(self.data)\n\n # Store the initial data and context variables\n self.update_ctx(\n {\n f\"{self._id}_data\": data_list,\n f\"{self._id}_index\": 0,\n f\"{self._id}_aggregated\": [],\n f\"{self._id}_initialized\": True,\n }\n )\n\n def _validate_data(self, data):\n \"\"\"Validate and return a list of Data objects.\"\"\"\n if isinstance(data, DataFrame):\n return data.to_data_list()\n if isinstance(data, Data):\n return [data]\n if isinstance(data, list) and all(isinstance(item, Data) for item in data):\n return data\n msg = \"The 'data' input must be a DataFrame, a list of Data objects, or a single Data object.\"\n raise TypeError(msg)\n\n def evaluate_stop_loop(self) -> bool:\n \"\"\"Evaluate whether to stop item or done output.\"\"\"\n current_index = self.ctx.get(f\"{self._id}_index\", 0)\n data_length = len(self.ctx.get(f\"{self._id}_data\", []))\n return current_index > data_length\n\n def item_output(self) -> Data:\n \"\"\"Output the next item in the list or stop if done.\"\"\"\n self.initialize_data()\n current_item = Data(text=\"\")\n\n if self.evaluate_stop_loop():\n self.stop(\"item\")\n else:\n # Get data list and current index\n data_list, current_index = self.loop_variables()\n if current_index < len(data_list):\n # Output current item and increment index\n try:\n current_item = data_list[current_index]\n except IndexError:\n current_item = Data(text=\"\")\n self.aggregated_output()\n self.update_ctx({f\"{self._id}_index\": current_index + 1})\n\n # Now we need to update the dependencies for the next run\n self.update_dependency()\n return current_item\n\n def update_dependency(self):\n item_dependency_id = self.get_incoming_edge_by_target_param(\"item\")\n\n self.graph.run_manager.run_predecessors[self._id].append(item_dependency_id)\n\n def done_output(self) -> DataFrame:\n \"\"\"Trigger the done output when iteration is complete.\"\"\"\n self.initialize_data()\n\n if self.evaluate_stop_loop():\n self.stop(\"item\")\n self.start(\"done\")\n\n aggregated = self.ctx.get(f\"{self._id}_aggregated\", [])\n\n return DataFrame(aggregated)\n self.stop(\"done\")\n return DataFrame([])\n\n def loop_variables(self):\n \"\"\"Retrieve loop variables from context.\"\"\"\n return (\n self.ctx.get(f\"{self._id}_data\", []),\n self.ctx.get(f\"{self._id}_index\", 0),\n )\n\n def aggregated_output(self) -> list[Data]:\n \"\"\"Return the aggregated list once all items are processed.\"\"\"\n self.initialize_data()\n\n # Get data list and aggregated list\n data_list = self.ctx.get(f\"{self._id}_data\", [])\n aggregated = self.ctx.get(f\"{self._id}_aggregated\", [])\n loop_input = self.item\n if loop_input is not None and not isinstance(loop_input, str) and len(aggregated) <= len(data_list):\n aggregated.append(loop_input)\n self.update_ctx({f\"{self._id}_aggregated\": aggregated})\n return aggregated\n"
},
"data": {
"_input_type": "HandleInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/SEO Keyword Generator.json b/src/backend/base/langflow/initial_setup/starter_projects/SEO Keyword Generator.json
index 8860abc20..fe4a511f5 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/SEO Keyword Generator.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/SEO Keyword Generator.json
@@ -158,7 +158,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"current_solutions": {
"advanced": false,
@@ -459,7 +459,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"template": {
"_input_type": "PromptInput",
@@ -655,7 +655,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -973,7 +973,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
+ "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
},
"input_value": {
"_input_type": "MessageInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/SaaS Pricing.json b/src/backend/base/langflow/initial_setup/starter_projects/SaaS Pricing.json
index 3a50f9b2a..976d75421 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/SaaS Pricing.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/SaaS Pricing.json
@@ -153,7 +153,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"continuous_development_cost": {
"advanced": false,
@@ -464,7 +464,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -844,7 +844,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
+ "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
},
"handle_parsing_errors": {
"_input_type": "BoolInput",
@@ -1401,7 +1401,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import ast\nimport operator\nfrom collections.abc import Callable\n\nfrom langflow.custom import Component\nfrom langflow.inputs import MessageTextInput\nfrom langflow.io import Output\nfrom langflow.schema import Data\n\n\nclass CalculatorComponent(Component):\n display_name = \"Calculator\"\n description = \"Perform basic arithmetic operations on a given expression.\"\n icon = \"calculator\"\n\n # Cache operators dictionary as a class variable\n OPERATORS: dict[type[ast.operator], Callable] = {\n ast.Add: operator.add,\n ast.Sub: operator.sub,\n ast.Mult: operator.mul,\n ast.Div: operator.truediv,\n ast.Pow: operator.pow,\n }\n\n inputs = [\n MessageTextInput(\n name=\"expression\",\n display_name=\"Expression\",\n info=\"The arithmetic expression to evaluate (e.g., '4*4*(33/22)+12-20').\",\n tool_mode=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"result\", type_=Data, method=\"evaluate_expression\"),\n ]\n\n def _eval_expr(self, node: ast.AST) -> float:\n \"\"\"Evaluate an AST node recursively.\"\"\"\n if isinstance(node, ast.Constant):\n if isinstance(node.value, int | float):\n return float(node.value)\n error_msg = f\"Unsupported constant type: {type(node.value).__name__}\"\n raise TypeError(error_msg)\n if isinstance(node, ast.Num): # For backwards compatibility\n if isinstance(node.n, int | float):\n return float(node.n)\n error_msg = f\"Unsupported number type: {type(node.n).__name__}\"\n raise TypeError(error_msg)\n\n if isinstance(node, ast.BinOp):\n op_type = type(node.op)\n if op_type not in self.OPERATORS:\n error_msg = f\"Unsupported binary operator: {op_type.__name__}\"\n raise TypeError(error_msg)\n\n left = self._eval_expr(node.left)\n right = self._eval_expr(node.right)\n return self.OPERATORS[op_type](left, right)\n\n error_msg = f\"Unsupported operation or expression type: {type(node).__name__}\"\n raise TypeError(error_msg)\n\n def evaluate_expression(self) -> Data:\n \"\"\"Evaluate the mathematical expression and return the result.\"\"\"\n try:\n tree = ast.parse(self.expression, mode=\"eval\")\n result = self._eval_expr(tree.body)\n\n formatted_result = f\"{float(result):.6f}\".rstrip(\"0\").rstrip(\".\")\n self.log(f\"Calculation result: {formatted_result}\")\n\n self.status = formatted_result\n return Data(data={\"result\": formatted_result})\n\n except ZeroDivisionError:\n error_message = \"Error: Division by zero\"\n self.status = error_message\n return Data(data={\"error\": error_message, \"input\": self.expression})\n\n except (SyntaxError, TypeError, KeyError, ValueError, AttributeError, OverflowError) as e:\n error_message = f\"Invalid expression: {e!s}\"\n self.status = error_message\n return Data(data={\"error\": error_message, \"input\": self.expression})\n\n def build(self):\n \"\"\"Return the main evaluation function.\"\"\"\n return self.evaluate_expression\n"
+ "value": "import ast\nimport operator\nfrom collections.abc import Callable\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import MessageTextInput\nfrom langflow.io import Output\nfrom langflow.schema.data import Data\n\n\nclass CalculatorComponent(Component):\n display_name = \"Calculator\"\n description = \"Perform basic arithmetic operations on a given expression.\"\n icon = \"calculator\"\n\n # Cache operators dictionary as a class variable\n OPERATORS: dict[type[ast.operator], Callable] = {\n ast.Add: operator.add,\n ast.Sub: operator.sub,\n ast.Mult: operator.mul,\n ast.Div: operator.truediv,\n ast.Pow: operator.pow,\n }\n\n inputs = [\n MessageTextInput(\n name=\"expression\",\n display_name=\"Expression\",\n info=\"The arithmetic expression to evaluate (e.g., '4*4*(33/22)+12-20').\",\n tool_mode=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"result\", type_=Data, method=\"evaluate_expression\"),\n ]\n\n def _eval_expr(self, node: ast.AST) -> float:\n \"\"\"Evaluate an AST node recursively.\"\"\"\n if isinstance(node, ast.Constant):\n if isinstance(node.value, int | float):\n return float(node.value)\n error_msg = f\"Unsupported constant type: {type(node.value).__name__}\"\n raise TypeError(error_msg)\n if isinstance(node, ast.Num): # For backwards compatibility\n if isinstance(node.n, int | float):\n return float(node.n)\n error_msg = f\"Unsupported number type: {type(node.n).__name__}\"\n raise TypeError(error_msg)\n\n if isinstance(node, ast.BinOp):\n op_type = type(node.op)\n if op_type not in self.OPERATORS:\n error_msg = f\"Unsupported binary operator: {op_type.__name__}\"\n raise TypeError(error_msg)\n\n left = self._eval_expr(node.left)\n right = self._eval_expr(node.right)\n return self.OPERATORS[op_type](left, right)\n\n error_msg = f\"Unsupported operation or expression type: {type(node).__name__}\"\n raise TypeError(error_msg)\n\n def evaluate_expression(self) -> Data:\n \"\"\"Evaluate the mathematical expression and return the result.\"\"\"\n try:\n tree = ast.parse(self.expression, mode=\"eval\")\n result = self._eval_expr(tree.body)\n\n formatted_result = f\"{float(result):.6f}\".rstrip(\"0\").rstrip(\".\")\n self.log(f\"Calculation result: {formatted_result}\")\n\n self.status = formatted_result\n return Data(data={\"result\": formatted_result})\n\n except ZeroDivisionError:\n error_message = \"Error: Division by zero\"\n self.status = error_message\n return Data(data={\"error\": error_message, \"input\": self.expression})\n\n except (SyntaxError, TypeError, KeyError, ValueError, AttributeError, OverflowError) as e:\n error_message = f\"Invalid expression: {e!s}\"\n self.status = error_message\n return Data(data={\"error\": error_message, \"input\": self.expression})\n\n def build(self):\n \"\"\"Return the main evaluation function.\"\"\"\n return self.evaluate_expression\n"
},
"expression": {
"_input_type": "MessageTextInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Search agent.json b/src/backend/base/langflow/initial_setup/starter_projects/Search agent.json
index cfb434b8d..6b8b47221 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Search agent.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Search agent.json
@@ -158,7 +158,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.custom import Component\nfrom langflow.io import (\n MessageTextInput,\n Output,\n SecretStrInput,\n)\nfrom langflow.schema import Data\n\n\nclass ScrapeGraphSearchApi(Component):\n display_name: str = \"ScrapeGraphSearchApi\"\n description: str = \"\"\"ScrapeGraph Search API.\n Given a search prompt, it will return search results using ScrapeGraph's search functionality.\n More info at https://docs.scrapegraphai.com/services/searchscraper\"\"\"\n name = \"ScrapeGraphSearchApi\"\n\n documentation: str = \"https://docs.scrapegraphai.com/introduction\"\n icon = \"ScrapeGraph\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"ScrapeGraph API Key\",\n required=True,\n password=True,\n info=\"The API key to use ScrapeGraph API.\",\n ),\n MessageTextInput(\n name=\"user_prompt\",\n display_name=\"Search Prompt\",\n tool_mode=True,\n info=\"The search prompt to use.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"data\", method=\"search\"),\n ]\n\n def search(self) -> list[Data]:\n try:\n from scrapegraph_py import Client\n from scrapegraph_py.logger import sgai_logger\n except ImportError as e:\n msg = \"Could not import scrapegraph-py package. Please install it with `pip install scrapegraph-py`.\"\n raise ImportError(msg) from e\n\n # Set logging level\n sgai_logger.set_logging(level=\"INFO\")\n\n # Initialize the client with API key\n sgai_client = Client(api_key=self.api_key)\n\n try:\n # SearchScraper request\n response = sgai_client.searchscraper(\n user_prompt=self.user_prompt,\n )\n\n # Close the client\n sgai_client.close()\n\n return Data(data=response)\n except Exception:\n sgai_client.close()\n raise\n"
+ "value": "from langflow.custom.custom_component.component import Component\nfrom langflow.io import (\n MessageTextInput,\n Output,\n SecretStrInput,\n)\nfrom langflow.schema.data import Data\n\n\nclass ScrapeGraphSearchApi(Component):\n display_name: str = \"ScrapeGraphSearchApi\"\n description: str = \"\"\"ScrapeGraph Search API.\n Given a search prompt, it will return search results using ScrapeGraph's search functionality.\n More info at https://docs.scrapegraphai.com/services/searchscraper\"\"\"\n name = \"ScrapeGraphSearchApi\"\n\n documentation: str = \"https://docs.scrapegraphai.com/introduction\"\n icon = \"ScrapeGraph\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"ScrapeGraph API Key\",\n required=True,\n password=True,\n info=\"The API key to use ScrapeGraph API.\",\n ),\n MessageTextInput(\n name=\"user_prompt\",\n display_name=\"Search Prompt\",\n tool_mode=True,\n info=\"The search prompt to use.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"data\", method=\"search\"),\n ]\n\n def search(self) -> list[Data]:\n try:\n from scrapegraph_py import Client\n from scrapegraph_py.logger import sgai_logger\n except ImportError as e:\n msg = \"Could not import scrapegraph-py package. Please install it with `pip install scrapegraph-py`.\"\n raise ImportError(msg) from e\n\n # Set logging level\n sgai_logger.set_logging(level=\"INFO\")\n\n # Initialize the client with API key\n sgai_client = Client(api_key=self.api_key)\n\n try:\n # SearchScraper request\n response = sgai_client.searchscraper(\n user_prompt=self.user_prompt,\n )\n\n # Close the client\n sgai_client.close()\n\n return Data(data=response)\n except Exception:\n sgai_client.close()\n raise\n"
},
"tools_metadata": {
"_input_type": "TableInput",
@@ -419,7 +419,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
@@ -747,7 +747,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -1210,7 +1210,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
+ "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
},
"handle_parsing_errors": {
"_input_type": "BoolInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Sequential Tasks Agents.json b/src/backend/base/langflow/initial_setup/starter_projects/Sequential Tasks Agents.json
index a1f6b7a91..eca9a979b 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Sequential Tasks Agents.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Sequential Tasks Agents.json
@@ -486,7 +486,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
+ "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
},
"handle_parsing_errors": {
"_input_type": "BoolInput",
@@ -1153,7 +1153,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
+ "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
},
"handle_parsing_errors": {
"_input_type": "BoolInput",
@@ -1717,7 +1717,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"template": {
"_input_type": "PromptInput",
@@ -1851,7 +1851,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"template": {
"_input_type": "PromptInput",
@@ -1988,7 +1988,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"finance_agent_output": {
"advanced": false,
@@ -2210,7 +2210,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
@@ -2646,7 +2646,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
+ "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
},
"handle_parsing_errors": {
"_input_type": "BoolInput",
@@ -3282,7 +3282,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import ast\nimport pprint\nfrom enum import Enum\n\nimport yfinance as yf\nfrom langchain_core.tools import ToolException\nfrom loguru import logger\nfrom pydantic import BaseModel, Field\n\nfrom langflow.custom import Component\nfrom langflow.inputs import DropdownInput, IntInput, MessageTextInput\nfrom langflow.io import Output\nfrom langflow.schema import Data, DataFrame\n\n\nclass YahooFinanceMethod(Enum):\n GET_INFO = \"get_info\"\n GET_NEWS = \"get_news\"\n GET_ACTIONS = \"get_actions\"\n GET_ANALYSIS = \"get_analysis\"\n GET_BALANCE_SHEET = \"get_balance_sheet\"\n GET_CALENDAR = \"get_calendar\"\n GET_CASHFLOW = \"get_cashflow\"\n GET_INSTITUTIONAL_HOLDERS = \"get_institutional_holders\"\n GET_RECOMMENDATIONS = \"get_recommendations\"\n GET_SUSTAINABILITY = \"get_sustainability\"\n GET_MAJOR_HOLDERS = \"get_major_holders\"\n GET_MUTUALFUND_HOLDERS = \"get_mutualfund_holders\"\n GET_INSIDER_PURCHASES = \"get_insider_purchases\"\n GET_INSIDER_TRANSACTIONS = \"get_insider_transactions\"\n GET_INSIDER_ROSTER_HOLDERS = \"get_insider_roster_holders\"\n GET_DIVIDENDS = \"get_dividends\"\n GET_CAPITAL_GAINS = \"get_capital_gains\"\n GET_SPLITS = \"get_splits\"\n GET_SHARES = \"get_shares\"\n GET_FAST_INFO = \"get_fast_info\"\n GET_SEC_FILINGS = \"get_sec_filings\"\n GET_RECOMMENDATIONS_SUMMARY = \"get_recommendations_summary\"\n GET_UPGRADES_DOWNGRADES = \"get_upgrades_downgrades\"\n GET_EARNINGS = \"get_earnings\"\n GET_INCOME_STMT = \"get_income_stmt\"\n\n\nclass YahooFinanceSchema(BaseModel):\n symbol: str = Field(..., description=\"The stock symbol to retrieve data for.\")\n method: YahooFinanceMethod = Field(YahooFinanceMethod.GET_INFO, description=\"The type of data to retrieve.\")\n num_news: int | None = Field(5, description=\"The number of news articles to retrieve.\")\n\n\nclass YfinanceComponent(Component):\n display_name = \"Yahoo Finance\"\n description = \"\"\"Uses [yfinance](https://pypi.org/project/yfinance/) (unofficial package) \\\nto access financial data and market information from Yahoo Finance.\"\"\"\n icon = \"trending-up\"\n\n inputs = [\n MessageTextInput(\n name=\"symbol\",\n display_name=\"Stock Symbol\",\n info=\"The stock symbol to retrieve data for (e.g., AAPL, GOOG).\",\n tool_mode=True,\n ),\n DropdownInput(\n name=\"method\",\n display_name=\"Data Method\",\n info=\"The type of data to retrieve.\",\n options=list(YahooFinanceMethod),\n value=\"get_news\",\n ),\n IntInput(\n name=\"num_news\",\n display_name=\"Number of News\",\n info=\"The number of news articles to retrieve (only applicable for get_news).\",\n value=5,\n ),\n ]\n\n outputs = [\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"fetch_content_dataframe\"),\n ]\n\n def run_model(self) -> DataFrame:\n return self.fetch_content_dataframe()\n\n def _fetch_yfinance_data(self, ticker: yf.Ticker, method: YahooFinanceMethod, num_news: int | None) -> str:\n try:\n if method == YahooFinanceMethod.GET_INFO:\n result = ticker.info\n elif method == YahooFinanceMethod.GET_NEWS:\n result = ticker.news[:num_news]\n else:\n result = getattr(ticker, method.value)()\n return pprint.pformat(result)\n except Exception as e:\n error_message = f\"Error retrieving data: {e}\"\n logger.debug(error_message)\n self.status = error_message\n raise ToolException(error_message) from e\n\n def fetch_content(self) -> list[Data]:\n try:\n return self._yahoo_finance_tool(\n self.symbol,\n YahooFinanceMethod(self.method),\n self.num_news,\n )\n except ToolException:\n raise\n except Exception as e:\n error_message = f\"Unexpected error: {e}\"\n logger.debug(error_message)\n self.status = error_message\n raise ToolException(error_message) from e\n\n def _yahoo_finance_tool(\n self,\n symbol: str,\n method: YahooFinanceMethod,\n num_news: int | None = 5,\n ) -> list[Data]:\n ticker = yf.Ticker(symbol)\n result = self._fetch_yfinance_data(ticker, method, num_news)\n\n if method == YahooFinanceMethod.GET_NEWS:\n data_list = [\n Data(text=f\"{article['title']}: {article['link']}\", data=article)\n for article in ast.literal_eval(result)\n ]\n else:\n data_list = [Data(text=result, data={\"result\": result})]\n\n return data_list\n\n def fetch_content_dataframe(self) -> DataFrame:\n data = self.fetch_content()\n return DataFrame(data)\n"
+ "value": "import ast\nimport pprint\nfrom enum import Enum\n\nimport yfinance as yf\nfrom langchain_core.tools import ToolException\nfrom loguru import logger\nfrom pydantic import BaseModel, Field\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DropdownInput, IntInput, MessageTextInput\nfrom langflow.io import Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\n\n\nclass YahooFinanceMethod(Enum):\n GET_INFO = \"get_info\"\n GET_NEWS = \"get_news\"\n GET_ACTIONS = \"get_actions\"\n GET_ANALYSIS = \"get_analysis\"\n GET_BALANCE_SHEET = \"get_balance_sheet\"\n GET_CALENDAR = \"get_calendar\"\n GET_CASHFLOW = \"get_cashflow\"\n GET_INSTITUTIONAL_HOLDERS = \"get_institutional_holders\"\n GET_RECOMMENDATIONS = \"get_recommendations\"\n GET_SUSTAINABILITY = \"get_sustainability\"\n GET_MAJOR_HOLDERS = \"get_major_holders\"\n GET_MUTUALFUND_HOLDERS = \"get_mutualfund_holders\"\n GET_INSIDER_PURCHASES = \"get_insider_purchases\"\n GET_INSIDER_TRANSACTIONS = \"get_insider_transactions\"\n GET_INSIDER_ROSTER_HOLDERS = \"get_insider_roster_holders\"\n GET_DIVIDENDS = \"get_dividends\"\n GET_CAPITAL_GAINS = \"get_capital_gains\"\n GET_SPLITS = \"get_splits\"\n GET_SHARES = \"get_shares\"\n GET_FAST_INFO = \"get_fast_info\"\n GET_SEC_FILINGS = \"get_sec_filings\"\n GET_RECOMMENDATIONS_SUMMARY = \"get_recommendations_summary\"\n GET_UPGRADES_DOWNGRADES = \"get_upgrades_downgrades\"\n GET_EARNINGS = \"get_earnings\"\n GET_INCOME_STMT = \"get_income_stmt\"\n\n\nclass YahooFinanceSchema(BaseModel):\n symbol: str = Field(..., description=\"The stock symbol to retrieve data for.\")\n method: YahooFinanceMethod = Field(YahooFinanceMethod.GET_INFO, description=\"The type of data to retrieve.\")\n num_news: int | None = Field(5, description=\"The number of news articles to retrieve.\")\n\n\nclass YfinanceComponent(Component):\n display_name = \"Yahoo Finance\"\n description = \"\"\"Uses [yfinance](https://pypi.org/project/yfinance/) (unofficial package) \\\nto access financial data and market information from Yahoo Finance.\"\"\"\n icon = \"trending-up\"\n\n inputs = [\n MessageTextInput(\n name=\"symbol\",\n display_name=\"Stock Symbol\",\n info=\"The stock symbol to retrieve data for (e.g., AAPL, GOOG).\",\n tool_mode=True,\n ),\n DropdownInput(\n name=\"method\",\n display_name=\"Data Method\",\n info=\"The type of data to retrieve.\",\n options=list(YahooFinanceMethod),\n value=\"get_news\",\n ),\n IntInput(\n name=\"num_news\",\n display_name=\"Number of News\",\n info=\"The number of news articles to retrieve (only applicable for get_news).\",\n value=5,\n ),\n ]\n\n outputs = [\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"fetch_content_dataframe\"),\n ]\n\n def run_model(self) -> DataFrame:\n return self.fetch_content_dataframe()\n\n def _fetch_yfinance_data(self, ticker: yf.Ticker, method: YahooFinanceMethod, num_news: int | None) -> str:\n try:\n if method == YahooFinanceMethod.GET_INFO:\n result = ticker.info\n elif method == YahooFinanceMethod.GET_NEWS:\n result = ticker.news[:num_news]\n else:\n result = getattr(ticker, method.value)()\n return pprint.pformat(result)\n except Exception as e:\n error_message = f\"Error retrieving data: {e}\"\n logger.debug(error_message)\n self.status = error_message\n raise ToolException(error_message) from e\n\n def fetch_content(self) -> list[Data]:\n try:\n return self._yahoo_finance_tool(\n self.symbol,\n YahooFinanceMethod(self.method),\n self.num_news,\n )\n except ToolException:\n raise\n except Exception as e:\n error_message = f\"Unexpected error: {e}\"\n logger.debug(error_message)\n self.status = error_message\n raise ToolException(error_message) from e\n\n def _yahoo_finance_tool(\n self,\n symbol: str,\n method: YahooFinanceMethod,\n num_news: int | None = 5,\n ) -> list[Data]:\n ticker = yf.Ticker(symbol)\n result = self._fetch_yfinance_data(ticker, method, num_news)\n\n if method == YahooFinanceMethod.GET_NEWS:\n data_list = [\n Data(text=f\"{article['title']}: {article['link']}\", data=article)\n for article in ast.literal_eval(result)\n ]\n else:\n data_list = [Data(text=result, data={\"result\": result})]\n\n return data_list\n\n def fetch_content_dataframe(self) -> DataFrame:\n data = self.fetch_content()\n return DataFrame(data)\n"
},
"method": {
"_input_type": "DropdownInput",
@@ -3571,7 +3571,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import ast\nimport operator\nfrom collections.abc import Callable\n\nfrom langflow.custom import Component\nfrom langflow.inputs import MessageTextInput\nfrom langflow.io import Output\nfrom langflow.schema import Data\n\n\nclass CalculatorComponent(Component):\n display_name = \"Calculator\"\n description = \"Perform basic arithmetic operations on a given expression.\"\n icon = \"calculator\"\n\n # Cache operators dictionary as a class variable\n OPERATORS: dict[type[ast.operator], Callable] = {\n ast.Add: operator.add,\n ast.Sub: operator.sub,\n ast.Mult: operator.mul,\n ast.Div: operator.truediv,\n ast.Pow: operator.pow,\n }\n\n inputs = [\n MessageTextInput(\n name=\"expression\",\n display_name=\"Expression\",\n info=\"The arithmetic expression to evaluate (e.g., '4*4*(33/22)+12-20').\",\n tool_mode=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"result\", type_=Data, method=\"evaluate_expression\"),\n ]\n\n def _eval_expr(self, node: ast.AST) -> float:\n \"\"\"Evaluate an AST node recursively.\"\"\"\n if isinstance(node, ast.Constant):\n if isinstance(node.value, int | float):\n return float(node.value)\n error_msg = f\"Unsupported constant type: {type(node.value).__name__}\"\n raise TypeError(error_msg)\n if isinstance(node, ast.Num): # For backwards compatibility\n if isinstance(node.n, int | float):\n return float(node.n)\n error_msg = f\"Unsupported number type: {type(node.n).__name__}\"\n raise TypeError(error_msg)\n\n if isinstance(node, ast.BinOp):\n op_type = type(node.op)\n if op_type not in self.OPERATORS:\n error_msg = f\"Unsupported binary operator: {op_type.__name__}\"\n raise TypeError(error_msg)\n\n left = self._eval_expr(node.left)\n right = self._eval_expr(node.right)\n return self.OPERATORS[op_type](left, right)\n\n error_msg = f\"Unsupported operation or expression type: {type(node).__name__}\"\n raise TypeError(error_msg)\n\n def evaluate_expression(self) -> Data:\n \"\"\"Evaluate the mathematical expression and return the result.\"\"\"\n try:\n tree = ast.parse(self.expression, mode=\"eval\")\n result = self._eval_expr(tree.body)\n\n formatted_result = f\"{float(result):.6f}\".rstrip(\"0\").rstrip(\".\")\n self.log(f\"Calculation result: {formatted_result}\")\n\n self.status = formatted_result\n return Data(data={\"result\": formatted_result})\n\n except ZeroDivisionError:\n error_message = \"Error: Division by zero\"\n self.status = error_message\n return Data(data={\"error\": error_message, \"input\": self.expression})\n\n except (SyntaxError, TypeError, KeyError, ValueError, AttributeError, OverflowError) as e:\n error_message = f\"Invalid expression: {e!s}\"\n self.status = error_message\n return Data(data={\"error\": error_message, \"input\": self.expression})\n\n def build(self):\n \"\"\"Return the main evaluation function.\"\"\"\n return self.evaluate_expression\n"
+ "value": "import ast\nimport operator\nfrom collections.abc import Callable\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import MessageTextInput\nfrom langflow.io import Output\nfrom langflow.schema.data import Data\n\n\nclass CalculatorComponent(Component):\n display_name = \"Calculator\"\n description = \"Perform basic arithmetic operations on a given expression.\"\n icon = \"calculator\"\n\n # Cache operators dictionary as a class variable\n OPERATORS: dict[type[ast.operator], Callable] = {\n ast.Add: operator.add,\n ast.Sub: operator.sub,\n ast.Mult: operator.mul,\n ast.Div: operator.truediv,\n ast.Pow: operator.pow,\n }\n\n inputs = [\n MessageTextInput(\n name=\"expression\",\n display_name=\"Expression\",\n info=\"The arithmetic expression to evaluate (e.g., '4*4*(33/22)+12-20').\",\n tool_mode=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"result\", type_=Data, method=\"evaluate_expression\"),\n ]\n\n def _eval_expr(self, node: ast.AST) -> float:\n \"\"\"Evaluate an AST node recursively.\"\"\"\n if isinstance(node, ast.Constant):\n if isinstance(node.value, int | float):\n return float(node.value)\n error_msg = f\"Unsupported constant type: {type(node.value).__name__}\"\n raise TypeError(error_msg)\n if isinstance(node, ast.Num): # For backwards compatibility\n if isinstance(node.n, int | float):\n return float(node.n)\n error_msg = f\"Unsupported number type: {type(node.n).__name__}\"\n raise TypeError(error_msg)\n\n if isinstance(node, ast.BinOp):\n op_type = type(node.op)\n if op_type not in self.OPERATORS:\n error_msg = f\"Unsupported binary operator: {op_type.__name__}\"\n raise TypeError(error_msg)\n\n left = self._eval_expr(node.left)\n right = self._eval_expr(node.right)\n return self.OPERATORS[op_type](left, right)\n\n error_msg = f\"Unsupported operation or expression type: {type(node).__name__}\"\n raise TypeError(error_msg)\n\n def evaluate_expression(self) -> Data:\n \"\"\"Evaluate the mathematical expression and return the result.\"\"\"\n try:\n tree = ast.parse(self.expression, mode=\"eval\")\n result = self._eval_expr(tree.body)\n\n formatted_result = f\"{float(result):.6f}\".rstrip(\"0\").rstrip(\".\")\n self.log(f\"Calculation result: {formatted_result}\")\n\n self.status = formatted_result\n return Data(data={\"result\": formatted_result})\n\n except ZeroDivisionError:\n error_message = \"Error: Division by zero\"\n self.status = error_message\n return Data(data={\"error\": error_message, \"input\": self.expression})\n\n except (SyntaxError, TypeError, KeyError, ValueError, AttributeError, OverflowError) as e:\n error_message = f\"Invalid expression: {e!s}\"\n self.status = error_message\n return Data(data={\"error\": error_message, \"input\": self.expression})\n\n def build(self):\n \"\"\"Return the main evaluation function.\"\"\"\n return self.evaluate_expression\n"
},
"expression": {
"_input_type": "MessageTextInput",
@@ -3803,7 +3803,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import httpx\nfrom loguru import logger\n\nfrom langflow.custom import Component\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, Output, SecretStrInput\nfrom langflow.schema import Data\nfrom langflow.schema.dataframe import DataFrame\n\n\nclass TavilySearchComponent(Component):\n display_name = \"Tavily Search API\"\n description = \"\"\"**Tavily Search** is a search engine optimized for LLMs and RAG, \\\n aimed at efficient, quick, and persistent search results.\"\"\"\n icon = \"TavilyIcon\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Tavily API Key\",\n required=True,\n info=\"Your Tavily API Key.\",\n ),\n MessageTextInput(\n name=\"query\",\n display_name=\"Search Query\",\n info=\"The search query you want to execute with Tavily.\",\n tool_mode=True,\n ),\n DropdownInput(\n name=\"search_depth\",\n display_name=\"Search Depth\",\n info=\"The depth of the search.\",\n options=[\"basic\", \"advanced\"],\n value=\"advanced\",\n advanced=True,\n ),\n IntInput(\n name=\"chunks_per_source\",\n display_name=\"Chunks Per Source\",\n info=(\"The number of content chunks to retrieve from each source (1-3). Only works with advanced search.\"),\n value=3,\n advanced=True,\n ),\n DropdownInput(\n name=\"topic\",\n display_name=\"Search Topic\",\n info=\"The category of the search.\",\n options=[\"general\", \"news\"],\n value=\"general\",\n advanced=True,\n ),\n IntInput(\n name=\"days\",\n display_name=\"Days\",\n info=\"Number of days back from current date to include. Only available with news topic.\",\n value=7,\n advanced=True,\n ),\n IntInput(\n name=\"max_results\",\n display_name=\"Max Results\",\n info=\"The maximum number of search results to return.\",\n value=5,\n advanced=True,\n ),\n BoolInput(\n name=\"include_answer\",\n display_name=\"Include Answer\",\n info=\"Include a short answer to original query.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"time_range\",\n display_name=\"Time Range\",\n info=\"The time range back from the current date to filter results.\",\n options=[\"day\", \"week\", \"month\", \"year\"],\n value=None, # Default to None to make it optional\n advanced=True,\n ),\n BoolInput(\n name=\"include_images\",\n display_name=\"Include Images\",\n info=\"Include a list of query-related images in the response.\",\n value=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"include_domains\",\n display_name=\"Include Domains\",\n info=\"Comma-separated list of domains to include in the search results.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"exclude_domains\",\n display_name=\"Exclude Domains\",\n info=\"Comma-separated list of domains to exclude from the search results.\",\n advanced=True,\n ),\n BoolInput(\n name=\"include_raw_content\",\n display_name=\"Include Raw Content\",\n info=\"Include the cleaned and parsed HTML content of each search result.\",\n value=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"fetch_content_dataframe\"),\n ]\n\n def fetch_content(self) -> list[Data]:\n try:\n # Only process domains if they're provided\n include_domains = None\n exclude_domains = None\n\n if self.include_domains:\n include_domains = [domain.strip() for domain in self.include_domains.split(\",\") if domain.strip()]\n\n if self.exclude_domains:\n exclude_domains = [domain.strip() for domain in self.exclude_domains.split(\",\") if domain.strip()]\n\n url = \"https://api.tavily.com/search\"\n headers = {\n \"content-type\": \"application/json\",\n \"accept\": \"application/json\",\n }\n\n payload = {\n \"api_key\": self.api_key,\n \"query\": self.query,\n \"search_depth\": self.search_depth,\n \"topic\": self.topic,\n \"max_results\": self.max_results,\n \"include_images\": self.include_images,\n \"include_answer\": self.include_answer,\n \"include_raw_content\": self.include_raw_content,\n \"days\": self.days,\n \"time_range\": self.time_range,\n }\n\n # Only add domains to payload if they exist and have values\n if include_domains:\n payload[\"include_domains\"] = include_domains\n if exclude_domains:\n payload[\"exclude_domains\"] = exclude_domains\n\n # Add conditional parameters only if they should be included\n if self.search_depth == \"advanced\" and self.chunks_per_source:\n payload[\"chunks_per_source\"] = self.chunks_per_source\n\n if self.topic == \"news\" and self.days:\n payload[\"days\"] = int(self.days) # Ensure days is an integer\n\n # Add time_range if it's set\n if hasattr(self, \"time_range\") and self.time_range:\n payload[\"time_range\"] = self.time_range\n\n # Add timeout handling\n with httpx.Client(timeout=90.0) as client:\n response = client.post(url, json=payload, headers=headers)\n\n response.raise_for_status()\n search_results = response.json()\n\n data_results = []\n\n if self.include_answer and search_results.get(\"answer\"):\n data_results.append(Data(text=search_results[\"answer\"]))\n\n for result in search_results.get(\"results\", []):\n content = result.get(\"content\", \"\")\n result_data = {\n \"title\": result.get(\"title\"),\n \"url\": result.get(\"url\"),\n \"content\": content,\n \"score\": result.get(\"score\"),\n }\n if self.include_raw_content:\n result_data[\"raw_content\"] = result.get(\"raw_content\")\n\n data_results.append(Data(text=content, data=result_data))\n\n if self.include_images and search_results.get(\"images\"):\n data_results.append(Data(text=\"Images found\", data={\"images\": search_results[\"images\"]}))\n\n except httpx.TimeoutException:\n error_message = \"Request timed out (90s). Please try again or adjust parameters.\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.HTTPStatusError as exc:\n error_message = f\"HTTP error occurred: {exc.response.status_code} - {exc.response.text}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.RequestError as exc:\n error_message = f\"Request error occurred: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except ValueError as exc:\n error_message = f\"Invalid response format: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n else:\n self.status = data_results\n return data_results\n\n def fetch_content_dataframe(self) -> DataFrame:\n data = self.fetch_content()\n return DataFrame(data)\n"
+ "value": "import httpx\nfrom loguru import logger\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.template.field.base import Output\n\n\nclass TavilySearchComponent(Component):\n display_name = \"Tavily Search API\"\n description = \"\"\"**Tavily Search** is a search engine optimized for LLMs and RAG, \\\n aimed at efficient, quick, and persistent search results.\"\"\"\n icon = \"TavilyIcon\"\n\n inputs = [\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Tavily API Key\",\n required=True,\n info=\"Your Tavily API Key.\",\n ),\n MessageTextInput(\n name=\"query\",\n display_name=\"Search Query\",\n info=\"The search query you want to execute with Tavily.\",\n tool_mode=True,\n ),\n DropdownInput(\n name=\"search_depth\",\n display_name=\"Search Depth\",\n info=\"The depth of the search.\",\n options=[\"basic\", \"advanced\"],\n value=\"advanced\",\n advanced=True,\n ),\n IntInput(\n name=\"chunks_per_source\",\n display_name=\"Chunks Per Source\",\n info=(\"The number of content chunks to retrieve from each source (1-3). Only works with advanced search.\"),\n value=3,\n advanced=True,\n ),\n DropdownInput(\n name=\"topic\",\n display_name=\"Search Topic\",\n info=\"The category of the search.\",\n options=[\"general\", \"news\"],\n value=\"general\",\n advanced=True,\n ),\n IntInput(\n name=\"days\",\n display_name=\"Days\",\n info=\"Number of days back from current date to include. Only available with news topic.\",\n value=7,\n advanced=True,\n ),\n IntInput(\n name=\"max_results\",\n display_name=\"Max Results\",\n info=\"The maximum number of search results to return.\",\n value=5,\n advanced=True,\n ),\n BoolInput(\n name=\"include_answer\",\n display_name=\"Include Answer\",\n info=\"Include a short answer to original query.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"time_range\",\n display_name=\"Time Range\",\n info=\"The time range back from the current date to filter results.\",\n options=[\"day\", \"week\", \"month\", \"year\"],\n value=None, # Default to None to make it optional\n advanced=True,\n ),\n BoolInput(\n name=\"include_images\",\n display_name=\"Include Images\",\n info=\"Include a list of query-related images in the response.\",\n value=True,\n advanced=True,\n ),\n MessageTextInput(\n name=\"include_domains\",\n display_name=\"Include Domains\",\n info=\"Comma-separated list of domains to include in the search results.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"exclude_domains\",\n display_name=\"Exclude Domains\",\n info=\"Comma-separated list of domains to exclude from the search results.\",\n advanced=True,\n ),\n BoolInput(\n name=\"include_raw_content\",\n display_name=\"Include Raw Content\",\n info=\"Include the cleaned and parsed HTML content of each search result.\",\n value=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"fetch_content_dataframe\"),\n ]\n\n def fetch_content(self) -> list[Data]:\n try:\n # Only process domains if they're provided\n include_domains = None\n exclude_domains = None\n\n if self.include_domains:\n include_domains = [domain.strip() for domain in self.include_domains.split(\",\") if domain.strip()]\n\n if self.exclude_domains:\n exclude_domains = [domain.strip() for domain in self.exclude_domains.split(\",\") if domain.strip()]\n\n url = \"https://api.tavily.com/search\"\n headers = {\n \"content-type\": \"application/json\",\n \"accept\": \"application/json\",\n }\n\n payload = {\n \"api_key\": self.api_key,\n \"query\": self.query,\n \"search_depth\": self.search_depth,\n \"topic\": self.topic,\n \"max_results\": self.max_results,\n \"include_images\": self.include_images,\n \"include_answer\": self.include_answer,\n \"include_raw_content\": self.include_raw_content,\n \"days\": self.days,\n \"time_range\": self.time_range,\n }\n\n # Only add domains to payload if they exist and have values\n if include_domains:\n payload[\"include_domains\"] = include_domains\n if exclude_domains:\n payload[\"exclude_domains\"] = exclude_domains\n\n # Add conditional parameters only if they should be included\n if self.search_depth == \"advanced\" and self.chunks_per_source:\n payload[\"chunks_per_source\"] = self.chunks_per_source\n\n if self.topic == \"news\" and self.days:\n payload[\"days\"] = int(self.days) # Ensure days is an integer\n\n # Add time_range if it's set\n if hasattr(self, \"time_range\") and self.time_range:\n payload[\"time_range\"] = self.time_range\n\n # Add timeout handling\n with httpx.Client(timeout=90.0) as client:\n response = client.post(url, json=payload, headers=headers)\n\n response.raise_for_status()\n search_results = response.json()\n\n data_results = []\n\n if self.include_answer and search_results.get(\"answer\"):\n data_results.append(Data(text=search_results[\"answer\"]))\n\n for result in search_results.get(\"results\", []):\n content = result.get(\"content\", \"\")\n result_data = {\n \"title\": result.get(\"title\"),\n \"url\": result.get(\"url\"),\n \"content\": content,\n \"score\": result.get(\"score\"),\n }\n if self.include_raw_content:\n result_data[\"raw_content\"] = result.get(\"raw_content\")\n\n data_results.append(Data(text=content, data=result_data))\n\n if self.include_images and search_results.get(\"images\"):\n data_results.append(Data(text=\"Images found\", data={\"images\": search_results[\"images\"]}))\n\n except httpx.TimeoutException:\n error_message = \"Request timed out (90s). Please try again or adjust parameters.\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.HTTPStatusError as exc:\n error_message = f\"HTTP error occurred: {exc.response.status_code} - {exc.response.text}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except httpx.RequestError as exc:\n error_message = f\"Request error occurred: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n except ValueError as exc:\n error_message = f\"Invalid response format: {exc}\"\n logger.error(error_message)\n return [Data(text=error_message, data={\"error\": error_message})]\n else:\n self.status = data_results\n return data_results\n\n def fetch_content_dataframe(self) -> DataFrame:\n data = self.fetch_content()\n return DataFrame(data)\n"
},
"days": {
"_input_type": "IntInput",
@@ -4298,7 +4298,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Simple Agent.json b/src/backend/base/langflow/initial_setup/starter_projects/Simple Agent.json
index 5033bc746..3fd61ef89 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Simple Agent.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Simple Agent.json
@@ -482,7 +482,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import ast\nimport operator\nfrom collections.abc import Callable\n\nfrom langflow.custom import Component\nfrom langflow.inputs import MessageTextInput\nfrom langflow.io import Output\nfrom langflow.schema import Data\n\n\nclass CalculatorComponent(Component):\n display_name = \"Calculator\"\n description = \"Perform basic arithmetic operations on a given expression.\"\n icon = \"calculator\"\n\n # Cache operators dictionary as a class variable\n OPERATORS: dict[type[ast.operator], Callable] = {\n ast.Add: operator.add,\n ast.Sub: operator.sub,\n ast.Mult: operator.mul,\n ast.Div: operator.truediv,\n ast.Pow: operator.pow,\n }\n\n inputs = [\n MessageTextInput(\n name=\"expression\",\n display_name=\"Expression\",\n info=\"The arithmetic expression to evaluate (e.g., '4*4*(33/22)+12-20').\",\n tool_mode=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"result\", type_=Data, method=\"evaluate_expression\"),\n ]\n\n def _eval_expr(self, node: ast.AST) -> float:\n \"\"\"Evaluate an AST node recursively.\"\"\"\n if isinstance(node, ast.Constant):\n if isinstance(node.value, int | float):\n return float(node.value)\n error_msg = f\"Unsupported constant type: {type(node.value).__name__}\"\n raise TypeError(error_msg)\n if isinstance(node, ast.Num): # For backwards compatibility\n if isinstance(node.n, int | float):\n return float(node.n)\n error_msg = f\"Unsupported number type: {type(node.n).__name__}\"\n raise TypeError(error_msg)\n\n if isinstance(node, ast.BinOp):\n op_type = type(node.op)\n if op_type not in self.OPERATORS:\n error_msg = f\"Unsupported binary operator: {op_type.__name__}\"\n raise TypeError(error_msg)\n\n left = self._eval_expr(node.left)\n right = self._eval_expr(node.right)\n return self.OPERATORS[op_type](left, right)\n\n error_msg = f\"Unsupported operation or expression type: {type(node).__name__}\"\n raise TypeError(error_msg)\n\n def evaluate_expression(self) -> Data:\n \"\"\"Evaluate the mathematical expression and return the result.\"\"\"\n try:\n tree = ast.parse(self.expression, mode=\"eval\")\n result = self._eval_expr(tree.body)\n\n formatted_result = f\"{float(result):.6f}\".rstrip(\"0\").rstrip(\".\")\n self.log(f\"Calculation result: {formatted_result}\")\n\n self.status = formatted_result\n return Data(data={\"result\": formatted_result})\n\n except ZeroDivisionError:\n error_message = \"Error: Division by zero\"\n self.status = error_message\n return Data(data={\"error\": error_message, \"input\": self.expression})\n\n except (SyntaxError, TypeError, KeyError, ValueError, AttributeError, OverflowError) as e:\n error_message = f\"Invalid expression: {e!s}\"\n self.status = error_message\n return Data(data={\"error\": error_message, \"input\": self.expression})\n\n def build(self):\n \"\"\"Return the main evaluation function.\"\"\"\n return self.evaluate_expression\n"
+ "value": "import ast\nimport operator\nfrom collections.abc import Callable\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import MessageTextInput\nfrom langflow.io import Output\nfrom langflow.schema.data import Data\n\n\nclass CalculatorComponent(Component):\n display_name = \"Calculator\"\n description = \"Perform basic arithmetic operations on a given expression.\"\n icon = \"calculator\"\n\n # Cache operators dictionary as a class variable\n OPERATORS: dict[type[ast.operator], Callable] = {\n ast.Add: operator.add,\n ast.Sub: operator.sub,\n ast.Mult: operator.mul,\n ast.Div: operator.truediv,\n ast.Pow: operator.pow,\n }\n\n inputs = [\n MessageTextInput(\n name=\"expression\",\n display_name=\"Expression\",\n info=\"The arithmetic expression to evaluate (e.g., '4*4*(33/22)+12-20').\",\n tool_mode=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"result\", type_=Data, method=\"evaluate_expression\"),\n ]\n\n def _eval_expr(self, node: ast.AST) -> float:\n \"\"\"Evaluate an AST node recursively.\"\"\"\n if isinstance(node, ast.Constant):\n if isinstance(node.value, int | float):\n return float(node.value)\n error_msg = f\"Unsupported constant type: {type(node.value).__name__}\"\n raise TypeError(error_msg)\n if isinstance(node, ast.Num): # For backwards compatibility\n if isinstance(node.n, int | float):\n return float(node.n)\n error_msg = f\"Unsupported number type: {type(node.n).__name__}\"\n raise TypeError(error_msg)\n\n if isinstance(node, ast.BinOp):\n op_type = type(node.op)\n if op_type not in self.OPERATORS:\n error_msg = f\"Unsupported binary operator: {op_type.__name__}\"\n raise TypeError(error_msg)\n\n left = self._eval_expr(node.left)\n right = self._eval_expr(node.right)\n return self.OPERATORS[op_type](left, right)\n\n error_msg = f\"Unsupported operation or expression type: {type(node).__name__}\"\n raise TypeError(error_msg)\n\n def evaluate_expression(self) -> Data:\n \"\"\"Evaluate the mathematical expression and return the result.\"\"\"\n try:\n tree = ast.parse(self.expression, mode=\"eval\")\n result = self._eval_expr(tree.body)\n\n formatted_result = f\"{float(result):.6f}\".rstrip(\"0\").rstrip(\".\")\n self.log(f\"Calculation result: {formatted_result}\")\n\n self.status = formatted_result\n return Data(data={\"result\": formatted_result})\n\n except ZeroDivisionError:\n error_message = \"Error: Division by zero\"\n self.status = error_message\n return Data(data={\"error\": error_message, \"input\": self.expression})\n\n except (SyntaxError, TypeError, KeyError, ValueError, AttributeError, OverflowError) as e:\n error_message = f\"Invalid expression: {e!s}\"\n self.status = error_message\n return Data(data={\"error\": error_message, \"input\": self.expression})\n\n def build(self):\n \"\"\"Return the main evaluation function.\"\"\"\n return self.evaluate_expression\n"
},
"expression": {
"_input_type": "MessageTextInput",
@@ -679,7 +679,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
@@ -1011,7 +1011,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -1392,7 +1392,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
+ "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
},
"handle_parsing_errors": {
"_input_type": "BoolInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Social Media Agent.json b/src/backend/base/langflow/initial_setup/starter_projects/Social Media Agent.json
index 398a56ccf..95e72213e 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Social Media Agent.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Social Media Agent.json
@@ -232,7 +232,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import json\nimport string\nfrom typing import Any, cast\n\nfrom apify_client import ApifyClient\nfrom langchain_community.document_loaders.apify_dataset import ApifyDatasetLoader\nfrom langchain_core.tools import BaseTool\nfrom pydantic import BaseModel, Field, field_serializer\n\nfrom langflow.custom import Component\nfrom langflow.field_typing import Tool\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import MultilineInput, Output, SecretStrInput, StrInput\nfrom langflow.schema import Data\n\nMAX_DESCRIPTION_LEN = 250\n\n\nclass ApifyActorsComponent(Component):\n display_name = \"Apify Actors\"\n description = (\n \"Use Apify Actors to extract data from hundreds of places fast. \"\n \"This component can be used in a flow to retrieve data or as a tool with an agent.\"\n )\n documentation: str = \"http://docs.langflow.org/integrations-apify\"\n icon = \"Apify\"\n name = \"ApifyActors\"\n\n inputs = [\n SecretStrInput(\n name=\"apify_token\",\n display_name=\"Apify Token\",\n info=\"The API token for the Apify account.\",\n required=True,\n password=True,\n ),\n StrInput(\n name=\"actor_id\",\n display_name=\"Actor\",\n info=(\n \"Actor name from Apify store to run. For example 'apify/website-content-crawler' \"\n \"to use the Website Content Crawler Actor.\"\n ),\n value=\"apify/website-content-crawler\",\n required=True,\n ),\n # multiline input is more pleasant to use than the nested dict input\n MultilineInput(\n name=\"run_input\",\n display_name=\"Run input\",\n info=(\n 'The JSON input for the Actor run. For example for the \"apify/website-content-crawler\" Actor: '\n '{\"startUrls\":[{\"url\":\"https://docs.apify.com/academy/web-scraping-for-beginners\"}],\"maxCrawlDepth\":0}'\n ),\n value='{\"startUrls\":[{\"url\":\"https://docs.apify.com/academy/web-scraping-for-beginners\"}],\"maxCrawlDepth\":0}',\n required=True,\n ),\n MultilineInput(\n name=\"dataset_fields\",\n display_name=\"Output fields\",\n info=(\n \"Fields to extract from the dataset, split by commas. \"\n \"Other fields will be ignored. Dots in nested structures will be replaced by underscores. \"\n \"Sample input: 'text, metadata.title'. \"\n \"Sample output: {'text': 'page content here', 'metadata_title': 'page title here'}. \"\n \"For example, for the 'apify/website-content-crawler' Actor, you can extract the 'markdown' field, \"\n \"which is the content of the website in markdown format.\"\n ),\n ),\n BoolInput(\n name=\"flatten_dataset\",\n display_name=\"Flatten output\",\n info=(\n \"The output dataset will be converted from a nested format to a flat structure. \"\n \"Dots in nested structure will be replaced by underscores. \"\n \"This is useful for further processing of the Data object. \"\n \"For example, {'a': {'b': 1}} will be flattened to {'a_b': 1}.\"\n ),\n ),\n ]\n\n outputs = [\n Output(display_name=\"Output\", name=\"output\", type_=list[Data], method=\"run_model\"),\n Output(display_name=\"Tool\", name=\"tool\", type_=Tool, method=\"build_tool\"),\n ]\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self._apify_client: ApifyClient | None = None\n\n def run_model(self) -> list[Data]:\n \"\"\"Run the Actor and return node output.\"\"\"\n input_ = json.loads(self.run_input)\n fields = ApifyActorsComponent.parse_dataset_fields(self.dataset_fields) if self.dataset_fields else None\n res = self._run_actor(self.actor_id, input_, fields=fields)\n if self.flatten_dataset:\n res = [ApifyActorsComponent.flatten(item) for item in res]\n data = [Data(data=item) for item in res]\n\n self.status = data\n return data\n\n def build_tool(self) -> Tool:\n \"\"\"Build a tool for an agent that runs the Apify Actor.\"\"\"\n actor_id = self.actor_id\n\n build = self._get_actor_latest_build(actor_id)\n readme = build.get(\"readme\", \"\")[:250] + \"...\"\n if not (input_schema_str := build.get(\"inputSchema\")):\n msg = \"Input schema not found\"\n raise ValueError(msg)\n input_schema = json.loads(input_schema_str)\n properties, required = ApifyActorsComponent.get_actor_input_schema_from_build(input_schema)\n properties = {\"run_input\": properties}\n\n # works from input schema\n info_ = [\n (\n \"JSON encoded as a string with input schema (STRICTLY FOLLOW JSON FORMAT AND SCHEMA):\\n\\n\"\n f\"{json.dumps(properties, separators=(',', ':'))}\"\n )\n ]\n if required:\n info_.append(\"\\n\\nRequired fields:\\n\" + \"\\n\".join(required))\n\n info = \"\".join(info_)\n\n input_model_cls = ApifyActorsComponent.create_input_model_class(info)\n tool_cls = ApifyActorsComponent.create_tool_class(self, readme, input_model_cls, actor_id)\n\n return cast(\"Tool\", tool_cls())\n\n @staticmethod\n def create_tool_class(\n parent: \"ApifyActorsComponent\", readme: str, input_model: type[BaseModel], actor_id: str\n ) -> type[BaseTool]:\n \"\"\"Create a tool class that runs an Apify Actor.\"\"\"\n\n class ApifyActorRun(BaseTool):\n \"\"\"Tool that runs Apify Actors.\"\"\"\n\n name: str = f\"apify_actor_{ApifyActorsComponent.actor_id_to_tool_name(actor_id)}\"\n description: str = (\n \"Run an Apify Actor with the given input. \"\n \"Here is a part of the currently loaded Actor README:\\n\\n\"\n f\"{readme}\\n\\n\"\n )\n\n args_schema: type[BaseModel] = input_model\n\n @field_serializer(\"args_schema\")\n def serialize_args_schema(self, args_schema):\n return args_schema.schema()\n\n def _run(self, run_input: str | dict) -> str:\n \"\"\"Use the Apify Actor.\"\"\"\n input_dict = json.loads(run_input) if isinstance(run_input, str) else run_input\n\n # retrieve if nested, just in case\n input_dict = input_dict.get(\"run_input\", input_dict)\n\n res = parent._run_actor(actor_id, input_dict)\n return \"\\n\\n\".join([ApifyActorsComponent.dict_to_json_str(item) for item in res])\n\n return ApifyActorRun\n\n @staticmethod\n def create_input_model_class(description: str) -> type[BaseModel]:\n \"\"\"Create a Pydantic model class for the Actor input.\"\"\"\n\n class ActorInput(BaseModel):\n \"\"\"Input for the Apify Actor tool.\"\"\"\n\n run_input: str = Field(..., description=description)\n\n return ActorInput\n\n def _get_apify_client(self) -> ApifyClient:\n \"\"\"Get the Apify client.\n\n Is created if not exists or token changes.\n \"\"\"\n if not self.apify_token:\n msg = \"API token is required.\"\n raise ValueError(msg)\n # when token changes, create a new client\n if self._apify_client is None or self._apify_client.token != self.apify_token:\n self._apify_client = ApifyClient(self.apify_token)\n if httpx_client := self._apify_client.http_client.httpx_client:\n httpx_client.headers[\"user-agent\"] += \"; Origin/langflow\"\n return self._apify_client\n\n def _get_actor_latest_build(self, actor_id: str) -> dict:\n \"\"\"Get the latest build of an Actor from the default build tag.\"\"\"\n client = self._get_apify_client()\n actor = client.actor(actor_id=actor_id)\n if not (actor_info := actor.get()):\n msg = f\"Actor {actor_id} not found.\"\n raise ValueError(msg)\n\n default_build_tag = actor_info.get(\"defaultRunOptions\", {}).get(\"build\")\n latest_build_id = actor_info.get(\"taggedBuilds\", {}).get(default_build_tag, {}).get(\"buildId\")\n\n if (build := client.build(latest_build_id).get()) is None:\n msg = f\"Build {latest_build_id} not found.\"\n raise ValueError(msg)\n\n return build\n\n @staticmethod\n def get_actor_input_schema_from_build(input_schema: dict) -> tuple[dict, list[str]]:\n \"\"\"Get the input schema from the Actor build.\n\n Trim the description to 250 characters.\n \"\"\"\n properties = input_schema.get(\"properties\", {})\n required = input_schema.get(\"required\", [])\n\n properties_out: dict = {}\n for item, meta in properties.items():\n properties_out[item] = {}\n if desc := meta.get(\"description\"):\n properties_out[item][\"description\"] = (\n desc[:MAX_DESCRIPTION_LEN] + \"...\" if len(desc) > MAX_DESCRIPTION_LEN else desc\n )\n for key_name in (\"type\", \"default\", \"prefill\", \"enum\"):\n if value := meta.get(key_name):\n properties_out[item][key_name] = value\n\n return properties_out, required\n\n def _get_run_dataset_id(self, run_id: str) -> str:\n \"\"\"Get the dataset id from the run id.\"\"\"\n client = self._get_apify_client()\n run = client.run(run_id=run_id)\n if (dataset := run.dataset().get()) is None:\n msg = \"Dataset not found\"\n raise ValueError(msg)\n if (did := dataset.get(\"id\")) is None:\n msg = \"Dataset id not found\"\n raise ValueError(msg)\n return did\n\n @staticmethod\n def dict_to_json_str(d: dict) -> str:\n \"\"\"Convert a dictionary to a JSON string.\"\"\"\n return json.dumps(d, separators=(\",\", \":\"), default=lambda _: \"\")\n\n @staticmethod\n def actor_id_to_tool_name(actor_id: str) -> str:\n \"\"\"Turn actor_id into a valid tool name.\n\n Tool name must only contain letters, numbers, underscores, dashes,\n and cannot contain spaces.\n \"\"\"\n valid_chars = string.ascii_letters + string.digits + \"_-\"\n return \"\".join(char if char in valid_chars else \"_\" for char in actor_id)\n\n def _run_actor(self, actor_id: str, run_input: dict, fields: list[str] | None = None) -> list[dict]:\n \"\"\"Run an Apify Actor and return the output dataset.\n\n Args:\n actor_id: Actor name from Apify store to run.\n run_input: JSON input for the Actor.\n fields: List of fields to extract from the dataset. Other fields will be ignored.\n \"\"\"\n client = self._get_apify_client()\n if (details := client.actor(actor_id=actor_id).call(run_input=run_input, wait_secs=1)) is None:\n msg = \"Actor run details not found\"\n raise ValueError(msg)\n if (run_id := details.get(\"id\")) is None:\n msg = \"Run id not found\"\n raise ValueError(msg)\n\n if (run_client := client.run(run_id)) is None:\n msg = \"Run client not found\"\n raise ValueError(msg)\n\n # stream logs\n with run_client.log().stream() as response:\n if response:\n for line in response.iter_lines():\n self.log(line)\n run_client.wait_for_finish()\n\n dataset_id = self._get_run_dataset_id(run_id)\n\n loader = ApifyDatasetLoader(\n dataset_id=dataset_id,\n dataset_mapping_function=lambda item: item\n if not fields\n else {k.replace(\".\", \"_\"): ApifyActorsComponent.get_nested_value(item, k) for k in fields},\n )\n return loader.load()\n\n @staticmethod\n def get_nested_value(data: dict[str, Any], key: str) -> Any:\n \"\"\"Get a nested value from a dictionary.\"\"\"\n keys = key.split(\".\")\n value = data\n for k in keys:\n if not isinstance(value, dict) or k not in value:\n return None\n value = value[k]\n return value\n\n @staticmethod\n def parse_dataset_fields(dataset_fields: str) -> list[str]:\n \"\"\"Convert a string of comma-separated fields into a list of fields.\"\"\"\n dataset_fields = dataset_fields.replace(\"'\", \"\").replace('\"', \"\").replace(\"`\", \"\")\n return [field.strip() for field in dataset_fields.split(\",\")]\n\n @staticmethod\n def flatten(d: dict) -> dict:\n \"\"\"Flatten a nested dictionary.\"\"\"\n\n def items():\n for key, value in d.items():\n if isinstance(value, dict):\n for subkey, subvalue in ApifyActorsComponent.flatten(value).items():\n yield key + \"_\" + subkey, subvalue\n else:\n yield key, value\n\n return dict(items())\n"
+ "value": "import json\nimport string\nfrom typing import Any, cast\n\nfrom apify_client import ApifyClient\nfrom langchain_community.document_loaders.apify_dataset import ApifyDatasetLoader\nfrom langchain_core.tools import BaseTool\nfrom pydantic import BaseModel, Field, field_serializer\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.field_typing import Tool\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import MultilineInput, Output, SecretStrInput, StrInput\nfrom langflow.schema.data import Data\n\nMAX_DESCRIPTION_LEN = 250\n\n\nclass ApifyActorsComponent(Component):\n display_name = \"Apify Actors\"\n description = (\n \"Use Apify Actors to extract data from hundreds of places fast. \"\n \"This component can be used in a flow to retrieve data or as a tool with an agent.\"\n )\n documentation: str = \"http://docs.langflow.org/integrations-apify\"\n icon = \"Apify\"\n name = \"ApifyActors\"\n\n inputs = [\n SecretStrInput(\n name=\"apify_token\",\n display_name=\"Apify Token\",\n info=\"The API token for the Apify account.\",\n required=True,\n password=True,\n ),\n StrInput(\n name=\"actor_id\",\n display_name=\"Actor\",\n info=(\n \"Actor name from Apify store to run. For example 'apify/website-content-crawler' \"\n \"to use the Website Content Crawler Actor.\"\n ),\n value=\"apify/website-content-crawler\",\n required=True,\n ),\n # multiline input is more pleasant to use than the nested dict input\n MultilineInput(\n name=\"run_input\",\n display_name=\"Run input\",\n info=(\n 'The JSON input for the Actor run. For example for the \"apify/website-content-crawler\" Actor: '\n '{\"startUrls\":[{\"url\":\"https://docs.apify.com/academy/web-scraping-for-beginners\"}],\"maxCrawlDepth\":0}'\n ),\n value='{\"startUrls\":[{\"url\":\"https://docs.apify.com/academy/web-scraping-for-beginners\"}],\"maxCrawlDepth\":0}',\n required=True,\n ),\n MultilineInput(\n name=\"dataset_fields\",\n display_name=\"Output fields\",\n info=(\n \"Fields to extract from the dataset, split by commas. \"\n \"Other fields will be ignored. Dots in nested structures will be replaced by underscores. \"\n \"Sample input: 'text, metadata.title'. \"\n \"Sample output: {'text': 'page content here', 'metadata_title': 'page title here'}. \"\n \"For example, for the 'apify/website-content-crawler' Actor, you can extract the 'markdown' field, \"\n \"which is the content of the website in markdown format.\"\n ),\n ),\n BoolInput(\n name=\"flatten_dataset\",\n display_name=\"Flatten output\",\n info=(\n \"The output dataset will be converted from a nested format to a flat structure. \"\n \"Dots in nested structure will be replaced by underscores. \"\n \"This is useful for further processing of the Data object. \"\n \"For example, {'a': {'b': 1}} will be flattened to {'a_b': 1}.\"\n ),\n ),\n ]\n\n outputs = [\n Output(display_name=\"Output\", name=\"output\", type_=list[Data], method=\"run_model\"),\n Output(display_name=\"Tool\", name=\"tool\", type_=Tool, method=\"build_tool\"),\n ]\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self._apify_client: ApifyClient | None = None\n\n def run_model(self) -> list[Data]:\n \"\"\"Run the Actor and return node output.\"\"\"\n input_ = json.loads(self.run_input)\n fields = ApifyActorsComponent.parse_dataset_fields(self.dataset_fields) if self.dataset_fields else None\n res = self._run_actor(self.actor_id, input_, fields=fields)\n if self.flatten_dataset:\n res = [ApifyActorsComponent.flatten(item) for item in res]\n data = [Data(data=item) for item in res]\n\n self.status = data\n return data\n\n def build_tool(self) -> Tool:\n \"\"\"Build a tool for an agent that runs the Apify Actor.\"\"\"\n actor_id = self.actor_id\n\n build = self._get_actor_latest_build(actor_id)\n readme = build.get(\"readme\", \"\")[:250] + \"...\"\n if not (input_schema_str := build.get(\"inputSchema\")):\n msg = \"Input schema not found\"\n raise ValueError(msg)\n input_schema = json.loads(input_schema_str)\n properties, required = ApifyActorsComponent.get_actor_input_schema_from_build(input_schema)\n properties = {\"run_input\": properties}\n\n # works from input schema\n info_ = [\n (\n \"JSON encoded as a string with input schema (STRICTLY FOLLOW JSON FORMAT AND SCHEMA):\\n\\n\"\n f\"{json.dumps(properties, separators=(',', ':'))}\"\n )\n ]\n if required:\n info_.append(\"\\n\\nRequired fields:\\n\" + \"\\n\".join(required))\n\n info = \"\".join(info_)\n\n input_model_cls = ApifyActorsComponent.create_input_model_class(info)\n tool_cls = ApifyActorsComponent.create_tool_class(self, readme, input_model_cls, actor_id)\n\n return cast(\"Tool\", tool_cls())\n\n @staticmethod\n def create_tool_class(\n parent: \"ApifyActorsComponent\", readme: str, input_model: type[BaseModel], actor_id: str\n ) -> type[BaseTool]:\n \"\"\"Create a tool class that runs an Apify Actor.\"\"\"\n\n class ApifyActorRun(BaseTool):\n \"\"\"Tool that runs Apify Actors.\"\"\"\n\n name: str = f\"apify_actor_{ApifyActorsComponent.actor_id_to_tool_name(actor_id)}\"\n description: str = (\n \"Run an Apify Actor with the given input. \"\n \"Here is a part of the currently loaded Actor README:\\n\\n\"\n f\"{readme}\\n\\n\"\n )\n\n args_schema: type[BaseModel] = input_model\n\n @field_serializer(\"args_schema\")\n def serialize_args_schema(self, args_schema):\n return args_schema.schema()\n\n def _run(self, run_input: str | dict) -> str:\n \"\"\"Use the Apify Actor.\"\"\"\n input_dict = json.loads(run_input) if isinstance(run_input, str) else run_input\n\n # retrieve if nested, just in case\n input_dict = input_dict.get(\"run_input\", input_dict)\n\n res = parent._run_actor(actor_id, input_dict)\n return \"\\n\\n\".join([ApifyActorsComponent.dict_to_json_str(item) for item in res])\n\n return ApifyActorRun\n\n @staticmethod\n def create_input_model_class(description: str) -> type[BaseModel]:\n \"\"\"Create a Pydantic model class for the Actor input.\"\"\"\n\n class ActorInput(BaseModel):\n \"\"\"Input for the Apify Actor tool.\"\"\"\n\n run_input: str = Field(..., description=description)\n\n return ActorInput\n\n def _get_apify_client(self) -> ApifyClient:\n \"\"\"Get the Apify client.\n\n Is created if not exists or token changes.\n \"\"\"\n if not self.apify_token:\n msg = \"API token is required.\"\n raise ValueError(msg)\n # when token changes, create a new client\n if self._apify_client is None or self._apify_client.token != self.apify_token:\n self._apify_client = ApifyClient(self.apify_token)\n if httpx_client := self._apify_client.http_client.httpx_client:\n httpx_client.headers[\"user-agent\"] += \"; Origin/langflow\"\n return self._apify_client\n\n def _get_actor_latest_build(self, actor_id: str) -> dict:\n \"\"\"Get the latest build of an Actor from the default build tag.\"\"\"\n client = self._get_apify_client()\n actor = client.actor(actor_id=actor_id)\n if not (actor_info := actor.get()):\n msg = f\"Actor {actor_id} not found.\"\n raise ValueError(msg)\n\n default_build_tag = actor_info.get(\"defaultRunOptions\", {}).get(\"build\")\n latest_build_id = actor_info.get(\"taggedBuilds\", {}).get(default_build_tag, {}).get(\"buildId\")\n\n if (build := client.build(latest_build_id).get()) is None:\n msg = f\"Build {latest_build_id} not found.\"\n raise ValueError(msg)\n\n return build\n\n @staticmethod\n def get_actor_input_schema_from_build(input_schema: dict) -> tuple[dict, list[str]]:\n \"\"\"Get the input schema from the Actor build.\n\n Trim the description to 250 characters.\n \"\"\"\n properties = input_schema.get(\"properties\", {})\n required = input_schema.get(\"required\", [])\n\n properties_out: dict = {}\n for item, meta in properties.items():\n properties_out[item] = {}\n if desc := meta.get(\"description\"):\n properties_out[item][\"description\"] = (\n desc[:MAX_DESCRIPTION_LEN] + \"...\" if len(desc) > MAX_DESCRIPTION_LEN else desc\n )\n for key_name in (\"type\", \"default\", \"prefill\", \"enum\"):\n if value := meta.get(key_name):\n properties_out[item][key_name] = value\n\n return properties_out, required\n\n def _get_run_dataset_id(self, run_id: str) -> str:\n \"\"\"Get the dataset id from the run id.\"\"\"\n client = self._get_apify_client()\n run = client.run(run_id=run_id)\n if (dataset := run.dataset().get()) is None:\n msg = \"Dataset not found\"\n raise ValueError(msg)\n if (did := dataset.get(\"id\")) is None:\n msg = \"Dataset id not found\"\n raise ValueError(msg)\n return did\n\n @staticmethod\n def dict_to_json_str(d: dict) -> str:\n \"\"\"Convert a dictionary to a JSON string.\"\"\"\n return json.dumps(d, separators=(\",\", \":\"), default=lambda _: \"\")\n\n @staticmethod\n def actor_id_to_tool_name(actor_id: str) -> str:\n \"\"\"Turn actor_id into a valid tool name.\n\n Tool name must only contain letters, numbers, underscores, dashes,\n and cannot contain spaces.\n \"\"\"\n valid_chars = string.ascii_letters + string.digits + \"_-\"\n return \"\".join(char if char in valid_chars else \"_\" for char in actor_id)\n\n def _run_actor(self, actor_id: str, run_input: dict, fields: list[str] | None = None) -> list[dict]:\n \"\"\"Run an Apify Actor and return the output dataset.\n\n Args:\n actor_id: Actor name from Apify store to run.\n run_input: JSON input for the Actor.\n fields: List of fields to extract from the dataset. Other fields will be ignored.\n \"\"\"\n client = self._get_apify_client()\n if (details := client.actor(actor_id=actor_id).call(run_input=run_input, wait_secs=1)) is None:\n msg = \"Actor run details not found\"\n raise ValueError(msg)\n if (run_id := details.get(\"id\")) is None:\n msg = \"Run id not found\"\n raise ValueError(msg)\n\n if (run_client := client.run(run_id)) is None:\n msg = \"Run client not found\"\n raise ValueError(msg)\n\n # stream logs\n with run_client.log().stream() as response:\n if response:\n for line in response.iter_lines():\n self.log(line)\n run_client.wait_for_finish()\n\n dataset_id = self._get_run_dataset_id(run_id)\n\n loader = ApifyDatasetLoader(\n dataset_id=dataset_id,\n dataset_mapping_function=lambda item: item\n if not fields\n else {k.replace(\".\", \"_\"): ApifyActorsComponent.get_nested_value(item, k) for k in fields},\n )\n return loader.load()\n\n @staticmethod\n def get_nested_value(data: dict[str, Any], key: str) -> Any:\n \"\"\"Get a nested value from a dictionary.\"\"\"\n keys = key.split(\".\")\n value = data\n for k in keys:\n if not isinstance(value, dict) or k not in value:\n return None\n value = value[k]\n return value\n\n @staticmethod\n def parse_dataset_fields(dataset_fields: str) -> list[str]:\n \"\"\"Convert a string of comma-separated fields into a list of fields.\"\"\"\n dataset_fields = dataset_fields.replace(\"'\", \"\").replace('\"', \"\").replace(\"`\", \"\")\n return [field.strip() for field in dataset_fields.split(\",\")]\n\n @staticmethod\n def flatten(d: dict) -> dict:\n \"\"\"Flatten a nested dictionary.\"\"\"\n\n def items():\n for key, value in d.items():\n if isinstance(value, dict):\n for subkey, subvalue in ApifyActorsComponent.flatten(value).items():\n yield key + \"_\" + subkey, subvalue\n else:\n yield key, value\n\n return dict(items())\n"
},
"dataset_fields": {
"_input_type": "MultilineInput",
@@ -434,7 +434,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import json\nimport string\nfrom typing import Any, cast\n\nfrom apify_client import ApifyClient\nfrom langchain_community.document_loaders.apify_dataset import ApifyDatasetLoader\nfrom langchain_core.tools import BaseTool\nfrom pydantic import BaseModel, Field, field_serializer\n\nfrom langflow.custom import Component\nfrom langflow.field_typing import Tool\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import MultilineInput, Output, SecretStrInput, StrInput\nfrom langflow.schema import Data\n\nMAX_DESCRIPTION_LEN = 250\n\n\nclass ApifyActorsComponent(Component):\n display_name = \"Apify Actors\"\n description = (\n \"Use Apify Actors to extract data from hundreds of places fast. \"\n \"This component can be used in a flow to retrieve data or as a tool with an agent.\"\n )\n documentation: str = \"http://docs.langflow.org/integrations-apify\"\n icon = \"Apify\"\n name = \"ApifyActors\"\n\n inputs = [\n SecretStrInput(\n name=\"apify_token\",\n display_name=\"Apify Token\",\n info=\"The API token for the Apify account.\",\n required=True,\n password=True,\n ),\n StrInput(\n name=\"actor_id\",\n display_name=\"Actor\",\n info=(\n \"Actor name from Apify store to run. For example 'apify/website-content-crawler' \"\n \"to use the Website Content Crawler Actor.\"\n ),\n value=\"apify/website-content-crawler\",\n required=True,\n ),\n # multiline input is more pleasant to use than the nested dict input\n MultilineInput(\n name=\"run_input\",\n display_name=\"Run input\",\n info=(\n 'The JSON input for the Actor run. For example for the \"apify/website-content-crawler\" Actor: '\n '{\"startUrls\":[{\"url\":\"https://docs.apify.com/academy/web-scraping-for-beginners\"}],\"maxCrawlDepth\":0}'\n ),\n value='{\"startUrls\":[{\"url\":\"https://docs.apify.com/academy/web-scraping-for-beginners\"}],\"maxCrawlDepth\":0}',\n required=True,\n ),\n MultilineInput(\n name=\"dataset_fields\",\n display_name=\"Output fields\",\n info=(\n \"Fields to extract from the dataset, split by commas. \"\n \"Other fields will be ignored. Dots in nested structures will be replaced by underscores. \"\n \"Sample input: 'text, metadata.title'. \"\n \"Sample output: {'text': 'page content here', 'metadata_title': 'page title here'}. \"\n \"For example, for the 'apify/website-content-crawler' Actor, you can extract the 'markdown' field, \"\n \"which is the content of the website in markdown format.\"\n ),\n ),\n BoolInput(\n name=\"flatten_dataset\",\n display_name=\"Flatten output\",\n info=(\n \"The output dataset will be converted from a nested format to a flat structure. \"\n \"Dots in nested structure will be replaced by underscores. \"\n \"This is useful for further processing of the Data object. \"\n \"For example, {'a': {'b': 1}} will be flattened to {'a_b': 1}.\"\n ),\n ),\n ]\n\n outputs = [\n Output(display_name=\"Output\", name=\"output\", type_=list[Data], method=\"run_model\"),\n Output(display_name=\"Tool\", name=\"tool\", type_=Tool, method=\"build_tool\"),\n ]\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self._apify_client: ApifyClient | None = None\n\n def run_model(self) -> list[Data]:\n \"\"\"Run the Actor and return node output.\"\"\"\n input_ = json.loads(self.run_input)\n fields = ApifyActorsComponent.parse_dataset_fields(self.dataset_fields) if self.dataset_fields else None\n res = self._run_actor(self.actor_id, input_, fields=fields)\n if self.flatten_dataset:\n res = [ApifyActorsComponent.flatten(item) for item in res]\n data = [Data(data=item) for item in res]\n\n self.status = data\n return data\n\n def build_tool(self) -> Tool:\n \"\"\"Build a tool for an agent that runs the Apify Actor.\"\"\"\n actor_id = self.actor_id\n\n build = self._get_actor_latest_build(actor_id)\n readme = build.get(\"readme\", \"\")[:250] + \"...\"\n if not (input_schema_str := build.get(\"inputSchema\")):\n msg = \"Input schema not found\"\n raise ValueError(msg)\n input_schema = json.loads(input_schema_str)\n properties, required = ApifyActorsComponent.get_actor_input_schema_from_build(input_schema)\n properties = {\"run_input\": properties}\n\n # works from input schema\n info_ = [\n (\n \"JSON encoded as a string with input schema (STRICTLY FOLLOW JSON FORMAT AND SCHEMA):\\n\\n\"\n f\"{json.dumps(properties, separators=(',', ':'))}\"\n )\n ]\n if required:\n info_.append(\"\\n\\nRequired fields:\\n\" + \"\\n\".join(required))\n\n info = \"\".join(info_)\n\n input_model_cls = ApifyActorsComponent.create_input_model_class(info)\n tool_cls = ApifyActorsComponent.create_tool_class(self, readme, input_model_cls, actor_id)\n\n return cast(\"Tool\", tool_cls())\n\n @staticmethod\n def create_tool_class(\n parent: \"ApifyActorsComponent\", readme: str, input_model: type[BaseModel], actor_id: str\n ) -> type[BaseTool]:\n \"\"\"Create a tool class that runs an Apify Actor.\"\"\"\n\n class ApifyActorRun(BaseTool):\n \"\"\"Tool that runs Apify Actors.\"\"\"\n\n name: str = f\"apify_actor_{ApifyActorsComponent.actor_id_to_tool_name(actor_id)}\"\n description: str = (\n \"Run an Apify Actor with the given input. \"\n \"Here is a part of the currently loaded Actor README:\\n\\n\"\n f\"{readme}\\n\\n\"\n )\n\n args_schema: type[BaseModel] = input_model\n\n @field_serializer(\"args_schema\")\n def serialize_args_schema(self, args_schema):\n return args_schema.schema()\n\n def _run(self, run_input: str | dict) -> str:\n \"\"\"Use the Apify Actor.\"\"\"\n input_dict = json.loads(run_input) if isinstance(run_input, str) else run_input\n\n # retrieve if nested, just in case\n input_dict = input_dict.get(\"run_input\", input_dict)\n\n res = parent._run_actor(actor_id, input_dict)\n return \"\\n\\n\".join([ApifyActorsComponent.dict_to_json_str(item) for item in res])\n\n return ApifyActorRun\n\n @staticmethod\n def create_input_model_class(description: str) -> type[BaseModel]:\n \"\"\"Create a Pydantic model class for the Actor input.\"\"\"\n\n class ActorInput(BaseModel):\n \"\"\"Input for the Apify Actor tool.\"\"\"\n\n run_input: str = Field(..., description=description)\n\n return ActorInput\n\n def _get_apify_client(self) -> ApifyClient:\n \"\"\"Get the Apify client.\n\n Is created if not exists or token changes.\n \"\"\"\n if not self.apify_token:\n msg = \"API token is required.\"\n raise ValueError(msg)\n # when token changes, create a new client\n if self._apify_client is None or self._apify_client.token != self.apify_token:\n self._apify_client = ApifyClient(self.apify_token)\n if httpx_client := self._apify_client.http_client.httpx_client:\n httpx_client.headers[\"user-agent\"] += \"; Origin/langflow\"\n return self._apify_client\n\n def _get_actor_latest_build(self, actor_id: str) -> dict:\n \"\"\"Get the latest build of an Actor from the default build tag.\"\"\"\n client = self._get_apify_client()\n actor = client.actor(actor_id=actor_id)\n if not (actor_info := actor.get()):\n msg = f\"Actor {actor_id} not found.\"\n raise ValueError(msg)\n\n default_build_tag = actor_info.get(\"defaultRunOptions\", {}).get(\"build\")\n latest_build_id = actor_info.get(\"taggedBuilds\", {}).get(default_build_tag, {}).get(\"buildId\")\n\n if (build := client.build(latest_build_id).get()) is None:\n msg = f\"Build {latest_build_id} not found.\"\n raise ValueError(msg)\n\n return build\n\n @staticmethod\n def get_actor_input_schema_from_build(input_schema: dict) -> tuple[dict, list[str]]:\n \"\"\"Get the input schema from the Actor build.\n\n Trim the description to 250 characters.\n \"\"\"\n properties = input_schema.get(\"properties\", {})\n required = input_schema.get(\"required\", [])\n\n properties_out: dict = {}\n for item, meta in properties.items():\n properties_out[item] = {}\n if desc := meta.get(\"description\"):\n properties_out[item][\"description\"] = (\n desc[:MAX_DESCRIPTION_LEN] + \"...\" if len(desc) > MAX_DESCRIPTION_LEN else desc\n )\n for key_name in (\"type\", \"default\", \"prefill\", \"enum\"):\n if value := meta.get(key_name):\n properties_out[item][key_name] = value\n\n return properties_out, required\n\n def _get_run_dataset_id(self, run_id: str) -> str:\n \"\"\"Get the dataset id from the run id.\"\"\"\n client = self._get_apify_client()\n run = client.run(run_id=run_id)\n if (dataset := run.dataset().get()) is None:\n msg = \"Dataset not found\"\n raise ValueError(msg)\n if (did := dataset.get(\"id\")) is None:\n msg = \"Dataset id not found\"\n raise ValueError(msg)\n return did\n\n @staticmethod\n def dict_to_json_str(d: dict) -> str:\n \"\"\"Convert a dictionary to a JSON string.\"\"\"\n return json.dumps(d, separators=(\",\", \":\"), default=lambda _: \"\")\n\n @staticmethod\n def actor_id_to_tool_name(actor_id: str) -> str:\n \"\"\"Turn actor_id into a valid tool name.\n\n Tool name must only contain letters, numbers, underscores, dashes,\n and cannot contain spaces.\n \"\"\"\n valid_chars = string.ascii_letters + string.digits + \"_-\"\n return \"\".join(char if char in valid_chars else \"_\" for char in actor_id)\n\n def _run_actor(self, actor_id: str, run_input: dict, fields: list[str] | None = None) -> list[dict]:\n \"\"\"Run an Apify Actor and return the output dataset.\n\n Args:\n actor_id: Actor name from Apify store to run.\n run_input: JSON input for the Actor.\n fields: List of fields to extract from the dataset. Other fields will be ignored.\n \"\"\"\n client = self._get_apify_client()\n if (details := client.actor(actor_id=actor_id).call(run_input=run_input, wait_secs=1)) is None:\n msg = \"Actor run details not found\"\n raise ValueError(msg)\n if (run_id := details.get(\"id\")) is None:\n msg = \"Run id not found\"\n raise ValueError(msg)\n\n if (run_client := client.run(run_id)) is None:\n msg = \"Run client not found\"\n raise ValueError(msg)\n\n # stream logs\n with run_client.log().stream() as response:\n if response:\n for line in response.iter_lines():\n self.log(line)\n run_client.wait_for_finish()\n\n dataset_id = self._get_run_dataset_id(run_id)\n\n loader = ApifyDatasetLoader(\n dataset_id=dataset_id,\n dataset_mapping_function=lambda item: item\n if not fields\n else {k.replace(\".\", \"_\"): ApifyActorsComponent.get_nested_value(item, k) for k in fields},\n )\n return loader.load()\n\n @staticmethod\n def get_nested_value(data: dict[str, Any], key: str) -> Any:\n \"\"\"Get a nested value from a dictionary.\"\"\"\n keys = key.split(\".\")\n value = data\n for k in keys:\n if not isinstance(value, dict) or k not in value:\n return None\n value = value[k]\n return value\n\n @staticmethod\n def parse_dataset_fields(dataset_fields: str) -> list[str]:\n \"\"\"Convert a string of comma-separated fields into a list of fields.\"\"\"\n dataset_fields = dataset_fields.replace(\"'\", \"\").replace('\"', \"\").replace(\"`\", \"\")\n return [field.strip() for field in dataset_fields.split(\",\")]\n\n @staticmethod\n def flatten(d: dict) -> dict:\n \"\"\"Flatten a nested dictionary.\"\"\"\n\n def items():\n for key, value in d.items():\n if isinstance(value, dict):\n for subkey, subvalue in ApifyActorsComponent.flatten(value).items():\n yield key + \"_\" + subkey, subvalue\n else:\n yield key, value\n\n return dict(items())\n"
+ "value": "import json\nimport string\nfrom typing import Any, cast\n\nfrom apify_client import ApifyClient\nfrom langchain_community.document_loaders.apify_dataset import ApifyDatasetLoader\nfrom langchain_core.tools import BaseTool\nfrom pydantic import BaseModel, Field, field_serializer\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.field_typing import Tool\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import MultilineInput, Output, SecretStrInput, StrInput\nfrom langflow.schema.data import Data\n\nMAX_DESCRIPTION_LEN = 250\n\n\nclass ApifyActorsComponent(Component):\n display_name = \"Apify Actors\"\n description = (\n \"Use Apify Actors to extract data from hundreds of places fast. \"\n \"This component can be used in a flow to retrieve data or as a tool with an agent.\"\n )\n documentation: str = \"http://docs.langflow.org/integrations-apify\"\n icon = \"Apify\"\n name = \"ApifyActors\"\n\n inputs = [\n SecretStrInput(\n name=\"apify_token\",\n display_name=\"Apify Token\",\n info=\"The API token for the Apify account.\",\n required=True,\n password=True,\n ),\n StrInput(\n name=\"actor_id\",\n display_name=\"Actor\",\n info=(\n \"Actor name from Apify store to run. For example 'apify/website-content-crawler' \"\n \"to use the Website Content Crawler Actor.\"\n ),\n value=\"apify/website-content-crawler\",\n required=True,\n ),\n # multiline input is more pleasant to use than the nested dict input\n MultilineInput(\n name=\"run_input\",\n display_name=\"Run input\",\n info=(\n 'The JSON input for the Actor run. For example for the \"apify/website-content-crawler\" Actor: '\n '{\"startUrls\":[{\"url\":\"https://docs.apify.com/academy/web-scraping-for-beginners\"}],\"maxCrawlDepth\":0}'\n ),\n value='{\"startUrls\":[{\"url\":\"https://docs.apify.com/academy/web-scraping-for-beginners\"}],\"maxCrawlDepth\":0}',\n required=True,\n ),\n MultilineInput(\n name=\"dataset_fields\",\n display_name=\"Output fields\",\n info=(\n \"Fields to extract from the dataset, split by commas. \"\n \"Other fields will be ignored. Dots in nested structures will be replaced by underscores. \"\n \"Sample input: 'text, metadata.title'. \"\n \"Sample output: {'text': 'page content here', 'metadata_title': 'page title here'}. \"\n \"For example, for the 'apify/website-content-crawler' Actor, you can extract the 'markdown' field, \"\n \"which is the content of the website in markdown format.\"\n ),\n ),\n BoolInput(\n name=\"flatten_dataset\",\n display_name=\"Flatten output\",\n info=(\n \"The output dataset will be converted from a nested format to a flat structure. \"\n \"Dots in nested structure will be replaced by underscores. \"\n \"This is useful for further processing of the Data object. \"\n \"For example, {'a': {'b': 1}} will be flattened to {'a_b': 1}.\"\n ),\n ),\n ]\n\n outputs = [\n Output(display_name=\"Output\", name=\"output\", type_=list[Data], method=\"run_model\"),\n Output(display_name=\"Tool\", name=\"tool\", type_=Tool, method=\"build_tool\"),\n ]\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self._apify_client: ApifyClient | None = None\n\n def run_model(self) -> list[Data]:\n \"\"\"Run the Actor and return node output.\"\"\"\n input_ = json.loads(self.run_input)\n fields = ApifyActorsComponent.parse_dataset_fields(self.dataset_fields) if self.dataset_fields else None\n res = self._run_actor(self.actor_id, input_, fields=fields)\n if self.flatten_dataset:\n res = [ApifyActorsComponent.flatten(item) for item in res]\n data = [Data(data=item) for item in res]\n\n self.status = data\n return data\n\n def build_tool(self) -> Tool:\n \"\"\"Build a tool for an agent that runs the Apify Actor.\"\"\"\n actor_id = self.actor_id\n\n build = self._get_actor_latest_build(actor_id)\n readme = build.get(\"readme\", \"\")[:250] + \"...\"\n if not (input_schema_str := build.get(\"inputSchema\")):\n msg = \"Input schema not found\"\n raise ValueError(msg)\n input_schema = json.loads(input_schema_str)\n properties, required = ApifyActorsComponent.get_actor_input_schema_from_build(input_schema)\n properties = {\"run_input\": properties}\n\n # works from input schema\n info_ = [\n (\n \"JSON encoded as a string with input schema (STRICTLY FOLLOW JSON FORMAT AND SCHEMA):\\n\\n\"\n f\"{json.dumps(properties, separators=(',', ':'))}\"\n )\n ]\n if required:\n info_.append(\"\\n\\nRequired fields:\\n\" + \"\\n\".join(required))\n\n info = \"\".join(info_)\n\n input_model_cls = ApifyActorsComponent.create_input_model_class(info)\n tool_cls = ApifyActorsComponent.create_tool_class(self, readme, input_model_cls, actor_id)\n\n return cast(\"Tool\", tool_cls())\n\n @staticmethod\n def create_tool_class(\n parent: \"ApifyActorsComponent\", readme: str, input_model: type[BaseModel], actor_id: str\n ) -> type[BaseTool]:\n \"\"\"Create a tool class that runs an Apify Actor.\"\"\"\n\n class ApifyActorRun(BaseTool):\n \"\"\"Tool that runs Apify Actors.\"\"\"\n\n name: str = f\"apify_actor_{ApifyActorsComponent.actor_id_to_tool_name(actor_id)}\"\n description: str = (\n \"Run an Apify Actor with the given input. \"\n \"Here is a part of the currently loaded Actor README:\\n\\n\"\n f\"{readme}\\n\\n\"\n )\n\n args_schema: type[BaseModel] = input_model\n\n @field_serializer(\"args_schema\")\n def serialize_args_schema(self, args_schema):\n return args_schema.schema()\n\n def _run(self, run_input: str | dict) -> str:\n \"\"\"Use the Apify Actor.\"\"\"\n input_dict = json.loads(run_input) if isinstance(run_input, str) else run_input\n\n # retrieve if nested, just in case\n input_dict = input_dict.get(\"run_input\", input_dict)\n\n res = parent._run_actor(actor_id, input_dict)\n return \"\\n\\n\".join([ApifyActorsComponent.dict_to_json_str(item) for item in res])\n\n return ApifyActorRun\n\n @staticmethod\n def create_input_model_class(description: str) -> type[BaseModel]:\n \"\"\"Create a Pydantic model class for the Actor input.\"\"\"\n\n class ActorInput(BaseModel):\n \"\"\"Input for the Apify Actor tool.\"\"\"\n\n run_input: str = Field(..., description=description)\n\n return ActorInput\n\n def _get_apify_client(self) -> ApifyClient:\n \"\"\"Get the Apify client.\n\n Is created if not exists or token changes.\n \"\"\"\n if not self.apify_token:\n msg = \"API token is required.\"\n raise ValueError(msg)\n # when token changes, create a new client\n if self._apify_client is None or self._apify_client.token != self.apify_token:\n self._apify_client = ApifyClient(self.apify_token)\n if httpx_client := self._apify_client.http_client.httpx_client:\n httpx_client.headers[\"user-agent\"] += \"; Origin/langflow\"\n return self._apify_client\n\n def _get_actor_latest_build(self, actor_id: str) -> dict:\n \"\"\"Get the latest build of an Actor from the default build tag.\"\"\"\n client = self._get_apify_client()\n actor = client.actor(actor_id=actor_id)\n if not (actor_info := actor.get()):\n msg = f\"Actor {actor_id} not found.\"\n raise ValueError(msg)\n\n default_build_tag = actor_info.get(\"defaultRunOptions\", {}).get(\"build\")\n latest_build_id = actor_info.get(\"taggedBuilds\", {}).get(default_build_tag, {}).get(\"buildId\")\n\n if (build := client.build(latest_build_id).get()) is None:\n msg = f\"Build {latest_build_id} not found.\"\n raise ValueError(msg)\n\n return build\n\n @staticmethod\n def get_actor_input_schema_from_build(input_schema: dict) -> tuple[dict, list[str]]:\n \"\"\"Get the input schema from the Actor build.\n\n Trim the description to 250 characters.\n \"\"\"\n properties = input_schema.get(\"properties\", {})\n required = input_schema.get(\"required\", [])\n\n properties_out: dict = {}\n for item, meta in properties.items():\n properties_out[item] = {}\n if desc := meta.get(\"description\"):\n properties_out[item][\"description\"] = (\n desc[:MAX_DESCRIPTION_LEN] + \"...\" if len(desc) > MAX_DESCRIPTION_LEN else desc\n )\n for key_name in (\"type\", \"default\", \"prefill\", \"enum\"):\n if value := meta.get(key_name):\n properties_out[item][key_name] = value\n\n return properties_out, required\n\n def _get_run_dataset_id(self, run_id: str) -> str:\n \"\"\"Get the dataset id from the run id.\"\"\"\n client = self._get_apify_client()\n run = client.run(run_id=run_id)\n if (dataset := run.dataset().get()) is None:\n msg = \"Dataset not found\"\n raise ValueError(msg)\n if (did := dataset.get(\"id\")) is None:\n msg = \"Dataset id not found\"\n raise ValueError(msg)\n return did\n\n @staticmethod\n def dict_to_json_str(d: dict) -> str:\n \"\"\"Convert a dictionary to a JSON string.\"\"\"\n return json.dumps(d, separators=(\",\", \":\"), default=lambda _: \"\")\n\n @staticmethod\n def actor_id_to_tool_name(actor_id: str) -> str:\n \"\"\"Turn actor_id into a valid tool name.\n\n Tool name must only contain letters, numbers, underscores, dashes,\n and cannot contain spaces.\n \"\"\"\n valid_chars = string.ascii_letters + string.digits + \"_-\"\n return \"\".join(char if char in valid_chars else \"_\" for char in actor_id)\n\n def _run_actor(self, actor_id: str, run_input: dict, fields: list[str] | None = None) -> list[dict]:\n \"\"\"Run an Apify Actor and return the output dataset.\n\n Args:\n actor_id: Actor name from Apify store to run.\n run_input: JSON input for the Actor.\n fields: List of fields to extract from the dataset. Other fields will be ignored.\n \"\"\"\n client = self._get_apify_client()\n if (details := client.actor(actor_id=actor_id).call(run_input=run_input, wait_secs=1)) is None:\n msg = \"Actor run details not found\"\n raise ValueError(msg)\n if (run_id := details.get(\"id\")) is None:\n msg = \"Run id not found\"\n raise ValueError(msg)\n\n if (run_client := client.run(run_id)) is None:\n msg = \"Run client not found\"\n raise ValueError(msg)\n\n # stream logs\n with run_client.log().stream() as response:\n if response:\n for line in response.iter_lines():\n self.log(line)\n run_client.wait_for_finish()\n\n dataset_id = self._get_run_dataset_id(run_id)\n\n loader = ApifyDatasetLoader(\n dataset_id=dataset_id,\n dataset_mapping_function=lambda item: item\n if not fields\n else {k.replace(\".\", \"_\"): ApifyActorsComponent.get_nested_value(item, k) for k in fields},\n )\n return loader.load()\n\n @staticmethod\n def get_nested_value(data: dict[str, Any], key: str) -> Any:\n \"\"\"Get a nested value from a dictionary.\"\"\"\n keys = key.split(\".\")\n value = data\n for k in keys:\n if not isinstance(value, dict) or k not in value:\n return None\n value = value[k]\n return value\n\n @staticmethod\n def parse_dataset_fields(dataset_fields: str) -> list[str]:\n \"\"\"Convert a string of comma-separated fields into a list of fields.\"\"\"\n dataset_fields = dataset_fields.replace(\"'\", \"\").replace('\"', \"\").replace(\"`\", \"\")\n return [field.strip() for field in dataset_fields.split(\",\")]\n\n @staticmethod\n def flatten(d: dict) -> dict:\n \"\"\"Flatten a nested dictionary.\"\"\"\n\n def items():\n for key, value in d.items():\n if isinstance(value, dict):\n for subkey, subvalue in ApifyActorsComponent.flatten(value).items():\n yield key + \"_\" + subkey, subvalue\n else:\n yield key, value\n\n return dict(items())\n"
},
"dataset_fields": {
"_input_type": "MultilineInput",
@@ -718,7 +718,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
@@ -1047,7 +1047,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -1454,7 +1454,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
+ "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
},
"handle_parsing_errors": {
"_input_type": "BoolInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Text Sentiment Analysis.json b/src/backend/base/langflow/initial_setup/starter_projects/Text Sentiment Analysis.json
index f7e5d508e..c290ea4d3 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Text Sentiment Analysis.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Text Sentiment Analysis.json
@@ -297,7 +297,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data import BaseFileComponent\nfrom langflow.base.data.utils import TEXT_FILE_TYPES, parallel_load_data, parse_text_file_to_data\nfrom langflow.io import BoolInput, IntInput\nfrom langflow.schema import Data\n\n\nclass FileComponent(BaseFileComponent):\n \"\"\"Handles loading and processing of individual or zipped text files.\n\n This component supports processing multiple valid files within a zip archive,\n resolving paths, validating file types, and optionally using multithreading for processing.\n \"\"\"\n\n display_name = \"File\"\n description = \"Loads content from one or more files as a DataFrame.\"\n icon = \"file-text\"\n name = \"File\"\n\n VALID_EXTENSIONS = TEXT_FILE_TYPES\n\n inputs = [\n *BaseFileComponent._base_inputs,\n BoolInput(\n name=\"use_multithreading\",\n display_name=\"[Deprecated] Use Multithreading\",\n advanced=True,\n value=True,\n info=\"Set 'Processing Concurrency' greater than 1 to enable multithreading.\",\n ),\n IntInput(\n name=\"concurrency_multithreading\",\n display_name=\"Processing Concurrency\",\n advanced=True,\n info=\"When multiple files are being processed, the number of files to process concurrently.\",\n value=1,\n ),\n ]\n\n outputs = [\n *BaseFileComponent._base_outputs,\n ]\n\n def process_files(self, file_list: list[BaseFileComponent.BaseFile]) -> list[BaseFileComponent.BaseFile]:\n \"\"\"Processes files either sequentially or in parallel, depending on concurrency settings.\n\n Args:\n file_list (list[BaseFileComponent.BaseFile]): List of files to process.\n\n Returns:\n list[BaseFileComponent.BaseFile]: Updated list of files with merged data.\n \"\"\"\n\n def process_file(file_path: str, *, silent_errors: bool = False) -> Data | None:\n \"\"\"Processes a single file and returns its Data object.\"\"\"\n try:\n return parse_text_file_to_data(file_path, silent_errors=silent_errors)\n except FileNotFoundError as e:\n msg = f\"File not found: {file_path}. Error: {e}\"\n self.log(msg)\n if not silent_errors:\n raise\n return None\n except Exception as e:\n msg = f\"Unexpected error processing {file_path}: {e}\"\n self.log(msg)\n if not silent_errors:\n raise\n return None\n\n if not file_list:\n msg = \"No files to process.\"\n raise ValueError(msg)\n\n concurrency = 1 if not self.use_multithreading else max(1, self.concurrency_multithreading)\n file_count = len(file_list)\n\n parallel_processing_threshold = 2\n if concurrency < parallel_processing_threshold or file_count < parallel_processing_threshold:\n if file_count > 1:\n self.log(f\"Processing {file_count} files sequentially.\")\n processed_data = [process_file(str(file.path), silent_errors=self.silent_errors) for file in file_list]\n else:\n self.log(f\"Starting parallel processing of {file_count} files with concurrency: {concurrency}.\")\n file_paths = [str(file.path) for file in file_list]\n processed_data = parallel_load_data(\n file_paths,\n silent_errors=self.silent_errors,\n load_function=process_file,\n max_concurrency=concurrency,\n )\n\n # Use rollup_basefile_data to merge processed data with BaseFile objects\n return self.rollup_data(file_list, processed_data)\n"
+ "value": "from langflow.base.data.base_file import BaseFileComponent\nfrom langflow.base.data.utils import TEXT_FILE_TYPES, parallel_load_data, parse_text_file_to_data\nfrom langflow.io import BoolInput, IntInput\nfrom langflow.schema.data import Data\n\n\nclass FileComponent(BaseFileComponent):\n \"\"\"Handles loading and processing of individual or zipped text files.\n\n This component supports processing multiple valid files within a zip archive,\n resolving paths, validating file types, and optionally using multithreading for processing.\n \"\"\"\n\n display_name = \"File\"\n description = \"Loads content from one or more files as a DataFrame.\"\n icon = \"file-text\"\n name = \"File\"\n\n VALID_EXTENSIONS = TEXT_FILE_TYPES\n\n inputs = [\n *BaseFileComponent._base_inputs,\n BoolInput(\n name=\"use_multithreading\",\n display_name=\"[Deprecated] Use Multithreading\",\n advanced=True,\n value=True,\n info=\"Set 'Processing Concurrency' greater than 1 to enable multithreading.\",\n ),\n IntInput(\n name=\"concurrency_multithreading\",\n display_name=\"Processing Concurrency\",\n advanced=True,\n info=\"When multiple files are being processed, the number of files to process concurrently.\",\n value=1,\n ),\n ]\n\n outputs = [\n *BaseFileComponent._base_outputs,\n ]\n\n def process_files(self, file_list: list[BaseFileComponent.BaseFile]) -> list[BaseFileComponent.BaseFile]:\n \"\"\"Processes files either sequentially or in parallel, depending on concurrency settings.\n\n Args:\n file_list (list[BaseFileComponent.BaseFile]): List of files to process.\n\n Returns:\n list[BaseFileComponent.BaseFile]: Updated list of files with merged data.\n \"\"\"\n\n def process_file(file_path: str, *, silent_errors: bool = False) -> Data | None:\n \"\"\"Processes a single file and returns its Data object.\"\"\"\n try:\n return parse_text_file_to_data(file_path, silent_errors=silent_errors)\n except FileNotFoundError as e:\n msg = f\"File not found: {file_path}. Error: {e}\"\n self.log(msg)\n if not silent_errors:\n raise\n return None\n except Exception as e:\n msg = f\"Unexpected error processing {file_path}: {e}\"\n self.log(msg)\n if not silent_errors:\n raise\n return None\n\n if not file_list:\n msg = \"No files to process.\"\n raise ValueError(msg)\n\n concurrency = 1 if not self.use_multithreading else max(1, self.concurrency_multithreading)\n file_count = len(file_list)\n\n parallel_processing_threshold = 2\n if concurrency < parallel_processing_threshold or file_count < parallel_processing_threshold:\n if file_count > 1:\n self.log(f\"Processing {file_count} files sequentially.\")\n processed_data = [process_file(str(file.path), silent_errors=self.silent_errors) for file in file_list]\n else:\n self.log(f\"Starting parallel processing of {file_count} files with concurrency: {concurrency}.\")\n file_paths = [str(file.path) for file in file_list]\n processed_data = parallel_load_data(\n file_paths,\n silent_errors=self.silent_errors,\n load_function=process_file,\n max_concurrency=concurrency,\n )\n\n # Use rollup_basefile_data to merge processed data with BaseFile objects\n return self.rollup_data(file_list, processed_data)\n"
},
"concurrency_multithreading": {
"_input_type": "IntInput",
@@ -580,7 +580,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"summary": {
"advanced": false,
@@ -734,7 +734,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"template": {
"_input_type": "PromptInput",
@@ -932,7 +932,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
+ "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
},
"input_value": {
"_input_type": "MessageInput",
@@ -1325,7 +1325,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
+ "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
},
"input_value": {
"_input_type": "MessageInput",
@@ -1672,7 +1672,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"template": {
"_input_type": "PromptInput",
@@ -1848,7 +1848,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
+ "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
},
"input_value": {
"_input_type": "MessageInput",
@@ -2286,7 +2286,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -2594,7 +2594,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Travel Planning Agents.json b/src/backend/base/langflow/initial_setup/starter_projects/Travel Planning Agents.json
index db75899c6..17558e7af 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Travel Planning Agents.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Travel Planning Agents.json
@@ -304,7 +304,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
@@ -621,7 +621,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -1295,7 +1295,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import ast\nimport operator\nfrom collections.abc import Callable\n\nfrom langflow.custom import Component\nfrom langflow.inputs import MessageTextInput\nfrom langflow.io import Output\nfrom langflow.schema import Data\n\n\nclass CalculatorComponent(Component):\n display_name = \"Calculator\"\n description = \"Perform basic arithmetic operations on a given expression.\"\n icon = \"calculator\"\n\n # Cache operators dictionary as a class variable\n OPERATORS: dict[type[ast.operator], Callable] = {\n ast.Add: operator.add,\n ast.Sub: operator.sub,\n ast.Mult: operator.mul,\n ast.Div: operator.truediv,\n ast.Pow: operator.pow,\n }\n\n inputs = [\n MessageTextInput(\n name=\"expression\",\n display_name=\"Expression\",\n info=\"The arithmetic expression to evaluate (e.g., '4*4*(33/22)+12-20').\",\n tool_mode=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"result\", type_=Data, method=\"evaluate_expression\"),\n ]\n\n def _eval_expr(self, node: ast.AST) -> float:\n \"\"\"Evaluate an AST node recursively.\"\"\"\n if isinstance(node, ast.Constant):\n if isinstance(node.value, int | float):\n return float(node.value)\n error_msg = f\"Unsupported constant type: {type(node.value).__name__}\"\n raise TypeError(error_msg)\n if isinstance(node, ast.Num): # For backwards compatibility\n if isinstance(node.n, int | float):\n return float(node.n)\n error_msg = f\"Unsupported number type: {type(node.n).__name__}\"\n raise TypeError(error_msg)\n\n if isinstance(node, ast.BinOp):\n op_type = type(node.op)\n if op_type not in self.OPERATORS:\n error_msg = f\"Unsupported binary operator: {op_type.__name__}\"\n raise TypeError(error_msg)\n\n left = self._eval_expr(node.left)\n right = self._eval_expr(node.right)\n return self.OPERATORS[op_type](left, right)\n\n error_msg = f\"Unsupported operation or expression type: {type(node).__name__}\"\n raise TypeError(error_msg)\n\n def evaluate_expression(self) -> Data:\n \"\"\"Evaluate the mathematical expression and return the result.\"\"\"\n try:\n tree = ast.parse(self.expression, mode=\"eval\")\n result = self._eval_expr(tree.body)\n\n formatted_result = f\"{float(result):.6f}\".rstrip(\"0\").rstrip(\".\")\n self.log(f\"Calculation result: {formatted_result}\")\n\n self.status = formatted_result\n return Data(data={\"result\": formatted_result})\n\n except ZeroDivisionError:\n error_message = \"Error: Division by zero\"\n self.status = error_message\n return Data(data={\"error\": error_message, \"input\": self.expression})\n\n except (SyntaxError, TypeError, KeyError, ValueError, AttributeError, OverflowError) as e:\n error_message = f\"Invalid expression: {e!s}\"\n self.status = error_message\n return Data(data={\"error\": error_message, \"input\": self.expression})\n\n def build(self):\n \"\"\"Return the main evaluation function.\"\"\"\n return self.evaluate_expression\n"
+ "value": "import ast\nimport operator\nfrom collections.abc import Callable\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import MessageTextInput\nfrom langflow.io import Output\nfrom langflow.schema.data import Data\n\n\nclass CalculatorComponent(Component):\n display_name = \"Calculator\"\n description = \"Perform basic arithmetic operations on a given expression.\"\n icon = \"calculator\"\n\n # Cache operators dictionary as a class variable\n OPERATORS: dict[type[ast.operator], Callable] = {\n ast.Add: operator.add,\n ast.Sub: operator.sub,\n ast.Mult: operator.mul,\n ast.Div: operator.truediv,\n ast.Pow: operator.pow,\n }\n\n inputs = [\n MessageTextInput(\n name=\"expression\",\n display_name=\"Expression\",\n info=\"The arithmetic expression to evaluate (e.g., '4*4*(33/22)+12-20').\",\n tool_mode=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"result\", type_=Data, method=\"evaluate_expression\"),\n ]\n\n def _eval_expr(self, node: ast.AST) -> float:\n \"\"\"Evaluate an AST node recursively.\"\"\"\n if isinstance(node, ast.Constant):\n if isinstance(node.value, int | float):\n return float(node.value)\n error_msg = f\"Unsupported constant type: {type(node.value).__name__}\"\n raise TypeError(error_msg)\n if isinstance(node, ast.Num): # For backwards compatibility\n if isinstance(node.n, int | float):\n return float(node.n)\n error_msg = f\"Unsupported number type: {type(node.n).__name__}\"\n raise TypeError(error_msg)\n\n if isinstance(node, ast.BinOp):\n op_type = type(node.op)\n if op_type not in self.OPERATORS:\n error_msg = f\"Unsupported binary operator: {op_type.__name__}\"\n raise TypeError(error_msg)\n\n left = self._eval_expr(node.left)\n right = self._eval_expr(node.right)\n return self.OPERATORS[op_type](left, right)\n\n error_msg = f\"Unsupported operation or expression type: {type(node).__name__}\"\n raise TypeError(error_msg)\n\n def evaluate_expression(self) -> Data:\n \"\"\"Evaluate the mathematical expression and return the result.\"\"\"\n try:\n tree = ast.parse(self.expression, mode=\"eval\")\n result = self._eval_expr(tree.body)\n\n formatted_result = f\"{float(result):.6f}\".rstrip(\"0\").rstrip(\".\")\n self.log(f\"Calculation result: {formatted_result}\")\n\n self.status = formatted_result\n return Data(data={\"result\": formatted_result})\n\n except ZeroDivisionError:\n error_message = \"Error: Division by zero\"\n self.status = error_message\n return Data(data={\"error\": error_message, \"input\": self.expression})\n\n except (SyntaxError, TypeError, KeyError, ValueError, AttributeError, OverflowError) as e:\n error_message = f\"Invalid expression: {e!s}\"\n self.status = error_message\n return Data(data={\"error\": error_message, \"input\": self.expression})\n\n def build(self):\n \"\"\"Return the main evaluation function.\"\"\"\n return self.evaluate_expression\n"
},
"expression": {
"_input_type": "MessageTextInput",
@@ -1464,7 +1464,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any\n\nfrom langchain_community.utilities.searchapi import SearchApiAPIWrapper\n\nfrom langflow.custom import Component\nfrom langflow.inputs import DictInput, DropdownInput, IntInput, MultilineInput, SecretStrInput\nfrom langflow.io import Output\nfrom langflow.schema import Data, DataFrame\n\n\nclass SearchComponent(Component):\n display_name: str = \"Search API\"\n description: str = \"Call the searchapi.io API with result limiting\"\n documentation: str = \"https://www.searchapi.io/docs/google\"\n icon = \"SearchAPI\"\n\n inputs = [\n DropdownInput(name=\"engine\", display_name=\"Engine\", value=\"google\", options=[\"google\", \"bing\", \"duckduckgo\"]),\n SecretStrInput(name=\"api_key\", display_name=\"SearchAPI API Key\", required=True),\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input\",\n tool_mode=True,\n ),\n DictInput(name=\"search_params\", display_name=\"Search parameters\", advanced=True, is_list=True),\n IntInput(name=\"max_results\", display_name=\"Max Results\", value=5, advanced=True),\n IntInput(name=\"max_snippet_length\", display_name=\"Max Snippet Length\", value=100, advanced=True),\n ]\n\n outputs = [\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"fetch_content_dataframe\"),\n ]\n\n def _build_wrapper(self):\n return SearchApiAPIWrapper(engine=self.engine, searchapi_api_key=self.api_key)\n\n def run_model(self) -> DataFrame:\n return self.fetch_content_dataframe()\n\n def fetch_content(self) -> list[Data]:\n wrapper = self._build_wrapper()\n\n def search_func(\n query: str, params: dict[str, Any] | None = None, max_results: int = 5, max_snippet_length: int = 100\n ) -> list[Data]:\n params = params or {}\n full_results = wrapper.results(query=query, **params)\n organic_results = full_results.get(\"organic_results\", [])[:max_results]\n\n return [\n Data(\n text=result.get(\"snippet\", \"\"),\n data={\n \"title\": result.get(\"title\", \"\")[:max_snippet_length],\n \"link\": result.get(\"link\", \"\"),\n \"snippet\": result.get(\"snippet\", \"\")[:max_snippet_length],\n },\n )\n for result in organic_results\n ]\n\n results = search_func(\n self.input_value,\n self.search_params or {},\n self.max_results,\n self.max_snippet_length,\n )\n self.status = results\n return results\n\n def fetch_content_dataframe(self) -> DataFrame:\n \"\"\"Convert the search results to a DataFrame.\n\n Returns:\n DataFrame: A DataFrame containing the search results.\n \"\"\"\n data = self.fetch_content()\n return DataFrame(data)\n"
+ "value": "from typing import Any\n\nfrom langchain_community.utilities.searchapi import SearchApiAPIWrapper\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DictInput, DropdownInput, IntInput, MultilineInput, SecretStrInput\nfrom langflow.io import Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\n\n\nclass SearchComponent(Component):\n display_name: str = \"Search API\"\n description: str = \"Call the searchapi.io API with result limiting\"\n documentation: str = \"https://www.searchapi.io/docs/google\"\n icon = \"SearchAPI\"\n\n inputs = [\n DropdownInput(name=\"engine\", display_name=\"Engine\", value=\"google\", options=[\"google\", \"bing\", \"duckduckgo\"]),\n SecretStrInput(name=\"api_key\", display_name=\"SearchAPI API Key\", required=True),\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input\",\n tool_mode=True,\n ),\n DictInput(name=\"search_params\", display_name=\"Search parameters\", advanced=True, is_list=True),\n IntInput(name=\"max_results\", display_name=\"Max Results\", value=5, advanced=True),\n IntInput(name=\"max_snippet_length\", display_name=\"Max Snippet Length\", value=100, advanced=True),\n ]\n\n outputs = [\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"fetch_content_dataframe\"),\n ]\n\n def _build_wrapper(self):\n return SearchApiAPIWrapper(engine=self.engine, searchapi_api_key=self.api_key)\n\n def run_model(self) -> DataFrame:\n return self.fetch_content_dataframe()\n\n def fetch_content(self) -> list[Data]:\n wrapper = self._build_wrapper()\n\n def search_func(\n query: str, params: dict[str, Any] | None = None, max_results: int = 5, max_snippet_length: int = 100\n ) -> list[Data]:\n params = params or {}\n full_results = wrapper.results(query=query, **params)\n organic_results = full_results.get(\"organic_results\", [])[:max_results]\n\n return [\n Data(\n text=result.get(\"snippet\", \"\"),\n data={\n \"title\": result.get(\"title\", \"\")[:max_snippet_length],\n \"link\": result.get(\"link\", \"\"),\n \"snippet\": result.get(\"snippet\", \"\")[:max_snippet_length],\n },\n )\n for result in organic_results\n ]\n\n results = search_func(\n self.input_value,\n self.search_params or {},\n self.max_results,\n self.max_snippet_length,\n )\n self.status = results\n return results\n\n def fetch_content_dataframe(self) -> DataFrame:\n \"\"\"Convert the search results to a DataFrame.\n\n Returns:\n DataFrame: A DataFrame containing the search results.\n \"\"\"\n data = self.fetch_content()\n return DataFrame(data)\n"
},
"engine": {
"_input_type": "DropdownInput",
@@ -1847,7 +1847,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
+ "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
},
"handle_parsing_errors": {
"_input_type": "BoolInput",
@@ -2590,7 +2590,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
+ "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
},
"handle_parsing_errors": {
"_input_type": "BoolInput",
@@ -3333,7 +3333,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
+ "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
},
"handle_parsing_errors": {
"_input_type": "BoolInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Twitter Thread Generator.json b/src/backend/base/langflow/initial_setup/starter_projects/Twitter Thread Generator.json
index 3cca544cb..653e551dd 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Twitter Thread Generator.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Twitter Thread Generator.json
@@ -368,7 +368,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
@@ -790,7 +790,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -1733,7 +1733,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"template": {
"_input_type": "PromptInput",
@@ -1913,7 +1913,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
+ "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
},
"input_value": {
"_input_type": "MessageInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json b/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json
index a252a4d55..df2b181f0 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json
@@ -424,7 +424,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"advanced": true,
@@ -675,7 +675,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"context": {
"advanced": false,
@@ -892,7 +892,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langchain_text_splitters import CharacterTextSplitter\n\nfrom langflow.custom import Component\nfrom langflow.io import DropdownInput, HandleInput, IntInput, MessageTextInput, Output\nfrom langflow.schema import Data, DataFrame\nfrom langflow.utils.util import unescape_string\n\n\nclass SplitTextComponent(Component):\n display_name: str = \"Split Text\"\n description: str = \"Split text into chunks based on specified criteria.\"\n icon = \"scissors-line-dashed\"\n name = \"SplitText\"\n\n inputs = [\n HandleInput(\n name=\"data_inputs\",\n display_name=\"Data or DataFrame\",\n info=\"The data with texts to split in chunks.\",\n input_types=[\"Data\", \"DataFrame\"],\n required=True,\n ),\n IntInput(\n name=\"chunk_overlap\",\n display_name=\"Chunk Overlap\",\n info=\"Number of characters to overlap between chunks.\",\n value=200,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n info=(\n \"The maximum length of each chunk. Text is first split by separator, \"\n \"then chunks are merged up to this size. \"\n \"Individual splits larger than this won't be further divided.\"\n ),\n value=1000,\n ),\n MessageTextInput(\n name=\"separator\",\n display_name=\"Separator\",\n info=(\n \"The character to split on. Use \\\\n for newline. \"\n \"Examples: \\\\n\\\\n for paragraphs, \\\\n for lines, . for sentences\"\n ),\n value=\"\\n\",\n ),\n MessageTextInput(\n name=\"text_key\",\n display_name=\"Text Key\",\n info=\"The key to use for the text column.\",\n value=\"text\",\n advanced=True,\n ),\n DropdownInput(\n name=\"keep_separator\",\n display_name=\"Keep Separator\",\n info=\"Whether to keep the separator in the output chunks and where to place it.\",\n options=[\"False\", \"True\", \"Start\", \"End\"],\n value=\"False\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Chunks\", name=\"chunks\", method=\"split_text\"),\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"as_dataframe\"),\n ]\n\n def _docs_to_data(self, docs) -> list[Data]:\n return [Data(text=doc.page_content, data=doc.metadata) for doc in docs]\n\n def _fix_separator(self, separator: str) -> str:\n \"\"\"Fix common separator issues and convert to proper format.\"\"\"\n if separator == \"/n\":\n return \"\\n\"\n if separator == \"/t\":\n return \"\\t\"\n return separator\n\n def split_text_base(self):\n separator = self._fix_separator(self.separator)\n separator = unescape_string(separator)\n\n if isinstance(self.data_inputs, DataFrame):\n if not len(self.data_inputs):\n msg = \"DataFrame is empty\"\n raise TypeError(msg)\n\n self.data_inputs.text_key = self.text_key\n try:\n documents = self.data_inputs.to_lc_documents()\n except Exception as e:\n msg = f\"Error converting DataFrame to documents: {e}\"\n raise TypeError(msg) from e\n else:\n if not self.data_inputs:\n msg = \"No data inputs provided\"\n raise TypeError(msg)\n\n documents = []\n if isinstance(self.data_inputs, Data):\n self.data_inputs.text_key = self.text_key\n documents = [self.data_inputs.to_lc_document()]\n else:\n try:\n documents = [input_.to_lc_document() for input_ in self.data_inputs if isinstance(input_, Data)]\n if not documents:\n msg = f\"No valid Data inputs found in {type(self.data_inputs)}\"\n raise TypeError(msg)\n except AttributeError as e:\n msg = f\"Invalid input type in collection: {e}\"\n raise TypeError(msg) from e\n try:\n # Convert string 'False'/'True' to boolean\n keep_sep = self.keep_separator\n if isinstance(keep_sep, str):\n if keep_sep.lower() == \"false\":\n keep_sep = False\n elif keep_sep.lower() == \"true\":\n keep_sep = True\n # 'start' and 'end' are kept as strings\n\n splitter = CharacterTextSplitter(\n chunk_overlap=self.chunk_overlap,\n chunk_size=self.chunk_size,\n separator=separator,\n keep_separator=keep_sep,\n )\n return splitter.split_documents(documents)\n except Exception as e:\n msg = f\"Error splitting text: {e}\"\n raise TypeError(msg) from e\n\n def split_text(self) -> list[Data]:\n return self._docs_to_data(self.split_text_base())\n\n def as_dataframe(self) -> DataFrame:\n return DataFrame(self.split_text())\n"
+ "value": "from langchain_text_splitters import CharacterTextSplitter\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.io import DropdownInput, HandleInput, IntInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.utils.util import unescape_string\n\n\nclass SplitTextComponent(Component):\n display_name: str = \"Split Text\"\n description: str = \"Split text into chunks based on specified criteria.\"\n icon = \"scissors-line-dashed\"\n name = \"SplitText\"\n\n inputs = [\n HandleInput(\n name=\"data_inputs\",\n display_name=\"Data or DataFrame\",\n info=\"The data with texts to split in chunks.\",\n input_types=[\"Data\", \"DataFrame\"],\n required=True,\n ),\n IntInput(\n name=\"chunk_overlap\",\n display_name=\"Chunk Overlap\",\n info=\"Number of characters to overlap between chunks.\",\n value=200,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n info=(\n \"The maximum length of each chunk. Text is first split by separator, \"\n \"then chunks are merged up to this size. \"\n \"Individual splits larger than this won't be further divided.\"\n ),\n value=1000,\n ),\n MessageTextInput(\n name=\"separator\",\n display_name=\"Separator\",\n info=(\n \"The character to split on. Use \\\\n for newline. \"\n \"Examples: \\\\n\\\\n for paragraphs, \\\\n for lines, . for sentences\"\n ),\n value=\"\\n\",\n ),\n MessageTextInput(\n name=\"text_key\",\n display_name=\"Text Key\",\n info=\"The key to use for the text column.\",\n value=\"text\",\n advanced=True,\n ),\n DropdownInput(\n name=\"keep_separator\",\n display_name=\"Keep Separator\",\n info=\"Whether to keep the separator in the output chunks and where to place it.\",\n options=[\"False\", \"True\", \"Start\", \"End\"],\n value=\"False\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Chunks\", name=\"chunks\", method=\"split_text\"),\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"as_dataframe\"),\n ]\n\n def _docs_to_data(self, docs) -> list[Data]:\n return [Data(text=doc.page_content, data=doc.metadata) for doc in docs]\n\n def _fix_separator(self, separator: str) -> str:\n \"\"\"Fix common separator issues and convert to proper format.\"\"\"\n if separator == \"/n\":\n return \"\\n\"\n if separator == \"/t\":\n return \"\\t\"\n return separator\n\n def split_text_base(self):\n separator = self._fix_separator(self.separator)\n separator = unescape_string(separator)\n\n if isinstance(self.data_inputs, DataFrame):\n if not len(self.data_inputs):\n msg = \"DataFrame is empty\"\n raise TypeError(msg)\n\n self.data_inputs.text_key = self.text_key\n try:\n documents = self.data_inputs.to_lc_documents()\n except Exception as e:\n msg = f\"Error converting DataFrame to documents: {e}\"\n raise TypeError(msg) from e\n else:\n if not self.data_inputs:\n msg = \"No data inputs provided\"\n raise TypeError(msg)\n\n documents = []\n if isinstance(self.data_inputs, Data):\n self.data_inputs.text_key = self.text_key\n documents = [self.data_inputs.to_lc_document()]\n else:\n try:\n documents = [input_.to_lc_document() for input_ in self.data_inputs if isinstance(input_, Data)]\n if not documents:\n msg = f\"No valid Data inputs found in {type(self.data_inputs)}\"\n raise TypeError(msg)\n except AttributeError as e:\n msg = f\"Invalid input type in collection: {e}\"\n raise TypeError(msg) from e\n try:\n # Convert string 'False'/'True' to boolean\n keep_sep = self.keep_separator\n if isinstance(keep_sep, str):\n if keep_sep.lower() == \"false\":\n keep_sep = False\n elif keep_sep.lower() == \"true\":\n keep_sep = True\n # 'start' and 'end' are kept as strings\n\n splitter = CharacterTextSplitter(\n chunk_overlap=self.chunk_overlap,\n chunk_size=self.chunk_size,\n separator=separator,\n keep_separator=keep_sep,\n )\n return splitter.split_documents(documents)\n except Exception as e:\n msg = f\"Error splitting text: {e}\"\n raise TypeError(msg) from e\n\n def split_text(self) -> list[Data]:\n return self._docs_to_data(self.split_text_base())\n\n def as_dataframe(self) -> DataFrame:\n return DataFrame(self.split_text())\n"
},
"data_inputs": {
"advanced": false,
@@ -1208,7 +1208,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -2477,7 +2477,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data import BaseFileComponent\nfrom langflow.base.data.utils import TEXT_FILE_TYPES, parallel_load_data, parse_text_file_to_data\nfrom langflow.io import BoolInput, IntInput\nfrom langflow.schema import Data\n\n\nclass FileComponent(BaseFileComponent):\n \"\"\"Handles loading and processing of individual or zipped text files.\n\n This component supports processing multiple valid files within a zip archive,\n resolving paths, validating file types, and optionally using multithreading for processing.\n \"\"\"\n\n display_name = \"File\"\n description = \"Loads content from one or more files as a DataFrame.\"\n icon = \"file-text\"\n name = \"File\"\n\n VALID_EXTENSIONS = TEXT_FILE_TYPES\n\n inputs = [\n *BaseFileComponent._base_inputs,\n BoolInput(\n name=\"use_multithreading\",\n display_name=\"[Deprecated] Use Multithreading\",\n advanced=True,\n value=True,\n info=\"Set 'Processing Concurrency' greater than 1 to enable multithreading.\",\n ),\n IntInput(\n name=\"concurrency_multithreading\",\n display_name=\"Processing Concurrency\",\n advanced=True,\n info=\"When multiple files are being processed, the number of files to process concurrently.\",\n value=1,\n ),\n ]\n\n outputs = [\n *BaseFileComponent._base_outputs,\n ]\n\n def process_files(self, file_list: list[BaseFileComponent.BaseFile]) -> list[BaseFileComponent.BaseFile]:\n \"\"\"Processes files either sequentially or in parallel, depending on concurrency settings.\n\n Args:\n file_list (list[BaseFileComponent.BaseFile]): List of files to process.\n\n Returns:\n list[BaseFileComponent.BaseFile]: Updated list of files with merged data.\n \"\"\"\n\n def process_file(file_path: str, *, silent_errors: bool = False) -> Data | None:\n \"\"\"Processes a single file and returns its Data object.\"\"\"\n try:\n return parse_text_file_to_data(file_path, silent_errors=silent_errors)\n except FileNotFoundError as e:\n msg = f\"File not found: {file_path}. Error: {e}\"\n self.log(msg)\n if not silent_errors:\n raise\n return None\n except Exception as e:\n msg = f\"Unexpected error processing {file_path}: {e}\"\n self.log(msg)\n if not silent_errors:\n raise\n return None\n\n if not file_list:\n msg = \"No files to process.\"\n raise ValueError(msg)\n\n concurrency = 1 if not self.use_multithreading else max(1, self.concurrency_multithreading)\n file_count = len(file_list)\n\n parallel_processing_threshold = 2\n if concurrency < parallel_processing_threshold or file_count < parallel_processing_threshold:\n if file_count > 1:\n self.log(f\"Processing {file_count} files sequentially.\")\n processed_data = [process_file(str(file.path), silent_errors=self.silent_errors) for file in file_list]\n else:\n self.log(f\"Starting parallel processing of {file_count} files with concurrency: {concurrency}.\")\n file_paths = [str(file.path) for file in file_list]\n processed_data = parallel_load_data(\n file_paths,\n silent_errors=self.silent_errors,\n load_function=process_file,\n max_concurrency=concurrency,\n )\n\n # Use rollup_basefile_data to merge processed data with BaseFile objects\n return self.rollup_data(file_list, processed_data)\n"
+ "value": "from langflow.base.data.base_file import BaseFileComponent\nfrom langflow.base.data.utils import TEXT_FILE_TYPES, parallel_load_data, parse_text_file_to_data\nfrom langflow.io import BoolInput, IntInput\nfrom langflow.schema.data import Data\n\n\nclass FileComponent(BaseFileComponent):\n \"\"\"Handles loading and processing of individual or zipped text files.\n\n This component supports processing multiple valid files within a zip archive,\n resolving paths, validating file types, and optionally using multithreading for processing.\n \"\"\"\n\n display_name = \"File\"\n description = \"Loads content from one or more files as a DataFrame.\"\n icon = \"file-text\"\n name = \"File\"\n\n VALID_EXTENSIONS = TEXT_FILE_TYPES\n\n inputs = [\n *BaseFileComponent._base_inputs,\n BoolInput(\n name=\"use_multithreading\",\n display_name=\"[Deprecated] Use Multithreading\",\n advanced=True,\n value=True,\n info=\"Set 'Processing Concurrency' greater than 1 to enable multithreading.\",\n ),\n IntInput(\n name=\"concurrency_multithreading\",\n display_name=\"Processing Concurrency\",\n advanced=True,\n info=\"When multiple files are being processed, the number of files to process concurrently.\",\n value=1,\n ),\n ]\n\n outputs = [\n *BaseFileComponent._base_outputs,\n ]\n\n def process_files(self, file_list: list[BaseFileComponent.BaseFile]) -> list[BaseFileComponent.BaseFile]:\n \"\"\"Processes files either sequentially or in parallel, depending on concurrency settings.\n\n Args:\n file_list (list[BaseFileComponent.BaseFile]): List of files to process.\n\n Returns:\n list[BaseFileComponent.BaseFile]: Updated list of files with merged data.\n \"\"\"\n\n def process_file(file_path: str, *, silent_errors: bool = False) -> Data | None:\n \"\"\"Processes a single file and returns its Data object.\"\"\"\n try:\n return parse_text_file_to_data(file_path, silent_errors=silent_errors)\n except FileNotFoundError as e:\n msg = f\"File not found: {file_path}. Error: {e}\"\n self.log(msg)\n if not silent_errors:\n raise\n return None\n except Exception as e:\n msg = f\"Unexpected error processing {file_path}: {e}\"\n self.log(msg)\n if not silent_errors:\n raise\n return None\n\n if not file_list:\n msg = \"No files to process.\"\n raise ValueError(msg)\n\n concurrency = 1 if not self.use_multithreading else max(1, self.concurrency_multithreading)\n file_count = len(file_list)\n\n parallel_processing_threshold = 2\n if concurrency < parallel_processing_threshold or file_count < parallel_processing_threshold:\n if file_count > 1:\n self.log(f\"Processing {file_count} files sequentially.\")\n processed_data = [process_file(str(file.path), silent_errors=self.silent_errors) for file in file_list]\n else:\n self.log(f\"Starting parallel processing of {file_count} files with concurrency: {concurrency}.\")\n file_paths = [str(file.path) for file in file_list]\n processed_data = parallel_load_data(\n file_paths,\n silent_errors=self.silent_errors,\n load_function=process_file,\n max_concurrency=concurrency,\n )\n\n # Use rollup_basefile_data to merge processed data with BaseFile objects\n return self.rollup_data(file_list, processed_data)\n"
},
"concurrency_multithreading": {
"_input_type": "IntInput",
@@ -2891,7 +2891,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
+ "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
},
"input_value": {
"_input_type": "MessageInput",
@@ -3536,7 +3536,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import re\nfrom collections import defaultdict\nfrom dataclasses import asdict, dataclass, field\n\nfrom astrapy import DataAPIClient, Database\nfrom astrapy.data.info.reranking import RerankServiceOptions\nfrom astrapy.info import CollectionDescriptor, CollectionLexicalOptions, CollectionRerankOptions\nfrom langchain_astradb import AstraDBVectorStore, VectorServiceOptions\nfrom langchain_astradb.utils.astradb import HybridSearchMode, _AstraDBCollectionEnvironment\n\nfrom langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom langflow.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom langflow.helpers import docs_to_data\nfrom langflow.inputs import FloatInput, NestedDictInput\nfrom langflow.io import (\n BoolInput,\n DropdownInput,\n HandleInput,\n IntInput,\n QueryInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema import Data\nfrom langflow.utils.version import get_version_info\n\n\n@vector_store_connection\nclass AstraDBVectorStoreComponent(LCVectorStoreComponent):\n display_name: str = \"Astra DB\"\n description: str = \"Ingest and search documents in Astra DB\"\n documentation: str = \"https://docs.datastax.com/en/langflow/astra-components.html\"\n name = \"AstraDB\"\n icon: str = \"AstraDB\"\n\n _cached_vector_store: AstraDBVectorStore | None = None\n\n @dataclass\n class NewDatabaseInput:\n functionality: str = \"create\"\n fields: dict[str, dict] = field(\n default_factory=lambda: {\n \"data\": {\n \"node\": {\n \"name\": \"create_database\",\n \"description\": \"Please allow several minutes for creation to complete.\",\n \"display_name\": \"Create new database\",\n \"field_order\": [\"01_new_database_name\", \"02_cloud_provider\", \"03_region\"],\n \"template\": {\n \"01_new_database_name\": StrInput(\n name=\"new_database_name\",\n display_name=\"Name\",\n info=\"Name of the new database to create in Astra DB.\",\n required=True,\n ),\n \"02_cloud_provider\": DropdownInput(\n name=\"cloud_provider\",\n display_name=\"Cloud provider\",\n info=\"Cloud provider for the new database.\",\n options=[],\n required=True,\n real_time_refresh=True,\n ),\n \"03_region\": DropdownInput(\n name=\"region\",\n display_name=\"Region\",\n info=\"Region for the new database.\",\n options=[],\n required=True,\n ),\n },\n },\n }\n }\n )\n\n @dataclass\n class NewCollectionInput:\n functionality: str = \"create\"\n fields: dict[str, dict] = field(\n default_factory=lambda: {\n \"data\": {\n \"node\": {\n \"name\": \"create_collection\",\n \"description\": \"Please allow several seconds for creation to complete.\",\n \"display_name\": \"Create new collection\",\n \"field_order\": [\n \"01_new_collection_name\",\n \"02_embedding_generation_provider\",\n \"03_embedding_generation_model\",\n \"04_dimension\",\n ],\n \"template\": {\n \"01_new_collection_name\": StrInput(\n name=\"new_collection_name\",\n display_name=\"Name\",\n info=\"Name of the new collection to create in Astra DB.\",\n required=True,\n ),\n \"02_embedding_generation_provider\": DropdownInput(\n name=\"embedding_generation_provider\",\n display_name=\"Embedding generation method\",\n info=\"Provider to use for generating embeddings.\",\n helper_text=(\n \"To create collections with more embedding provider options, go to \"\n 'your database in Astra DB'\n ),\n real_time_refresh=True,\n required=True,\n options=[],\n ),\n \"03_embedding_generation_model\": DropdownInput(\n name=\"embedding_generation_model\",\n display_name=\"Embedding model\",\n info=\"Model to use for generating embeddings.\",\n real_time_refresh=True,\n options=[],\n ),\n \"04_dimension\": IntInput(\n name=\"dimension\",\n display_name=\"Dimensions\",\n info=\"Dimensions of the embeddings to generate.\",\n value=None,\n ),\n },\n },\n }\n }\n )\n\n inputs = [\n SecretStrInput(\n name=\"token\",\n display_name=\"Astra DB Application Token\",\n info=\"Authentication token for accessing Astra DB.\",\n value=\"ASTRA_DB_APPLICATION_TOKEN\",\n required=True,\n real_time_refresh=True,\n input_types=[],\n ),\n DropdownInput(\n name=\"environment\",\n display_name=\"Environment\",\n info=\"The environment for the Astra DB API Endpoint.\",\n options=[\"prod\", \"test\", \"dev\"],\n value=\"prod\",\n advanced=True,\n real_time_refresh=True,\n combobox=True,\n ),\n DropdownInput(\n name=\"database_name\",\n display_name=\"Database\",\n info=\"The Database name for the Astra DB instance.\",\n required=True,\n refresh_button=True,\n real_time_refresh=True,\n dialog_inputs=asdict(NewDatabaseInput()),\n combobox=True,\n ),\n StrInput(\n name=\"api_endpoint\",\n display_name=\"Astra DB API Endpoint\",\n info=\"The API Endpoint for the Astra DB instance. Supercedes database selection.\",\n show=False,\n ),\n DropdownInput(\n name=\"keyspace\",\n display_name=\"Keyspace\",\n info=\"Optional keyspace within Astra DB to use for the collection.\",\n advanced=True,\n options=[],\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"collection_name\",\n display_name=\"Collection\",\n info=\"The name of the collection within Astra DB where the vectors will be stored.\",\n required=True,\n refresh_button=True,\n real_time_refresh=True,\n dialog_inputs=asdict(NewCollectionInput()),\n combobox=True,\n show=False,\n ),\n HandleInput(\n name=\"embedding_model\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Specify the Embedding Model. Not required for Astra Vectorize collections.\",\n required=False,\n show=False,\n ),\n *LCVectorStoreComponent.inputs,\n DropdownInput(\n name=\"search_method\",\n display_name=\"Search Method\",\n info=(\n \"Determine how your content is matched: Vector finds semantic similarity, \"\n \"and Hybrid Search (suggested) combines both approaches \"\n \"with a reranker.\"\n ),\n options=[\"Hybrid Search\", \"Vector Search\"], # TODO: Restore Lexical Search?\n options_metadata=[{\"icon\": \"SearchHybrid\"}, {\"icon\": \"SearchVector\"}],\n value=\"Vector Search\",\n advanced=True,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"reranker\",\n display_name=\"Reranker\",\n info=\"Post-retrieval model that re-scores results for optimal relevance ranking.\",\n show=False,\n toggle=True,\n ),\n QueryInput(\n name=\"lexical_terms\",\n display_name=\"Lexical Terms\",\n info=\"Add additional terms/keywords to augment search precision.\",\n placeholder=\"Enter terms to search...\",\n separator=\" \",\n show=False,\n value=\"\",\n advanced=True,\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Number of Search Results\",\n info=\"Number of search results to return.\",\n advanced=True,\n value=4,\n ),\n DropdownInput(\n name=\"search_type\",\n display_name=\"Search Type\",\n info=\"Search type to use\",\n options=[\"Similarity\", \"Similarity with score threshold\", \"MMR (Max Marginal Relevance)\"],\n value=\"Similarity\",\n advanced=True,\n ),\n FloatInput(\n name=\"search_score_threshold\",\n display_name=\"Search Score Threshold\",\n info=\"Minimum similarity score threshold for search results. \"\n \"(when using 'Similarity with score threshold')\",\n value=0,\n advanced=True,\n ),\n NestedDictInput(\n name=\"advanced_search_filter\",\n display_name=\"Search Metadata Filter\",\n info=\"Optional dictionary of filters to apply to the search query.\",\n advanced=True,\n ),\n BoolInput(\n name=\"autodetect_collection\",\n display_name=\"Autodetect Collection\",\n info=\"Boolean flag to determine whether to autodetect the collection.\",\n advanced=True,\n value=True,\n ),\n StrInput(\n name=\"content_field\",\n display_name=\"Content Field\",\n info=\"Field to use as the text content field for the vector store.\",\n advanced=True,\n ),\n StrInput(\n name=\"deletion_field\",\n display_name=\"Deletion Based On Field\",\n info=\"When this parameter is provided, documents in the target collection with \"\n \"metadata field values matching the input metadata field value will be deleted \"\n \"before new data is loaded.\",\n advanced=True,\n ),\n BoolInput(\n name=\"ignore_invalid_documents\",\n display_name=\"Ignore Invalid Documents\",\n info=\"Boolean flag to determine whether to ignore invalid documents at runtime.\",\n advanced=True,\n ),\n NestedDictInput(\n name=\"astradb_vectorstore_kwargs\",\n display_name=\"AstraDBVectorStore Parameters\",\n info=\"Optional dictionary of additional parameters for the AstraDBVectorStore.\",\n advanced=True,\n ),\n ]\n\n @classmethod\n def map_cloud_providers(cls):\n # TODO: Programmatically fetch the regions for each cloud provider\n return {\n \"dev\": {\n \"Amazon Web Services\": {\n \"id\": \"aws\",\n \"regions\": [\"us-west-2\"],\n },\n \"Google Cloud Platform\": {\n \"id\": \"gcp\",\n \"regions\": [\"us-central1\", \"europe-west4\"],\n },\n },\n \"test\": {\n \"Google Cloud Platform\": {\n \"id\": \"gcp\",\n \"regions\": [\"us-central1\"],\n },\n },\n \"prod\": {\n \"Amazon Web Services\": {\n \"id\": \"aws\",\n \"regions\": [\"us-east-2\", \"ap-south-1\", \"eu-west-1\"],\n },\n \"Google Cloud Platform\": {\n \"id\": \"gcp\",\n \"regions\": [\"us-east1\"],\n },\n \"Microsoft Azure\": {\n \"id\": \"azure\",\n \"regions\": [\"westus3\"],\n },\n },\n }\n\n @classmethod\n def get_vectorize_providers(cls, token: str, environment: str | None = None, api_endpoint: str | None = None):\n try:\n # Get the admin object\n client = DataAPIClient(environment=environment)\n admin_client = client.get_admin()\n db_admin = admin_client.get_database_admin(api_endpoint, token=token)\n\n # Get the list of embedding providers\n embedding_providers = db_admin.find_embedding_providers()\n\n vectorize_providers_mapping = {}\n # Map the provider display name to the provider key and models\n for provider_key, provider_data in embedding_providers.embedding_providers.items():\n # Get the provider display name and models\n display_name = provider_data.display_name\n models = [model.name for model in provider_data.models]\n\n # Build our mapping\n vectorize_providers_mapping[display_name] = [provider_key, models]\n\n # Sort the resulting dictionary\n return defaultdict(list, dict(sorted(vectorize_providers_mapping.items())))\n except Exception as _: # noqa: BLE001\n return {}\n\n @classmethod\n async def create_database_api(\n cls,\n new_database_name: str,\n cloud_provider: str,\n region: str,\n token: str,\n environment: str | None = None,\n keyspace: str | None = None,\n ):\n client = DataAPIClient(environment=environment)\n\n # Get the admin object\n admin_client = client.get_admin(token=token)\n\n # Get the environment, set to prod if null like\n my_env = environment or \"prod\"\n\n # Raise a value error if name isn't provided\n if not new_database_name:\n msg = \"Database name is required to create a new database.\"\n raise ValueError(msg)\n\n # Call the create database function\n return await admin_client.async_create_database(\n name=new_database_name,\n cloud_provider=cls.map_cloud_providers()[my_env][cloud_provider][\"id\"],\n region=region,\n keyspace=keyspace,\n wait_until_active=False,\n )\n\n @classmethod\n async def create_collection_api(\n cls,\n new_collection_name: str,\n token: str,\n api_endpoint: str,\n environment: str | None = None,\n keyspace: str | None = None,\n dimension: int | None = None,\n embedding_generation_provider: str | None = None,\n embedding_generation_model: str | None = None,\n reranker: str | None = None,\n ):\n # Build vectorize options, if needed\n vectorize_options = None\n if not dimension:\n providers = cls.get_vectorize_providers(token=token, environment=environment, api_endpoint=api_endpoint)\n vectorize_options = VectorServiceOptions(\n provider=providers.get(embedding_generation_provider, [None, []])[0],\n model_name=embedding_generation_model,\n )\n\n # Raise a value error if name isn't provided\n if not new_collection_name:\n msg = \"Collection name is required to create a new collection.\"\n raise ValueError(msg)\n\n # Define the base arguments being passed to the create collection function\n base_args = {\n \"collection_name\": new_collection_name,\n \"token\": token,\n \"api_endpoint\": api_endpoint,\n \"keyspace\": keyspace,\n \"environment\": environment,\n \"embedding_dimension\": dimension,\n \"collection_vector_service_options\": vectorize_options,\n }\n\n # Add optional arguments if the reranker is set\n if reranker:\n # Split the reranker field into a provider a model name\n provider, _ = reranker.split(\"/\")\n base_args[\"collection_rerank\"] = CollectionRerankOptions(\n service=RerankServiceOptions(provider=provider, model_name=reranker),\n )\n base_args[\"collection_lexical\"] = CollectionLexicalOptions(analyzer=\"STANDARD\")\n\n _AstraDBCollectionEnvironment(**base_args)\n\n @classmethod\n def get_database_list_static(cls, token: str, environment: str | None = None):\n client = DataAPIClient(environment=environment)\n\n # Get the admin object\n admin_client = client.get_admin(token=token)\n\n # Get the list of databases\n db_list = admin_client.list_databases()\n\n # Generate the api endpoint for each database\n db_info_dict = {}\n for db in db_list:\n try:\n # Get the API endpoint for the database\n api_endpoint = db.regions[0].api_endpoint\n\n # Get the number of collections\n try:\n # Get the number of collections in the database\n num_collections = len(\n client.get_database(\n api_endpoint,\n token=token,\n ).list_collection_names()\n )\n except Exception: # noqa: BLE001\n if db.status != \"PENDING\":\n continue\n num_collections = 0\n\n # Add the database to the dictionary\n db_info_dict[db.name] = {\n \"api_endpoint\": api_endpoint,\n \"keyspaces\": db.keyspaces,\n \"collections\": num_collections,\n \"status\": db.status if db.status != \"ACTIVE\" else None,\n \"org_id\": db.org_id if db.org_id else None,\n }\n except Exception: # noqa: BLE001, S110\n pass\n\n return db_info_dict\n\n def get_database_list(self):\n return self.get_database_list_static(\n token=self.token,\n environment=self.environment,\n )\n\n @classmethod\n def get_api_endpoint_static(\n cls,\n token: str,\n environment: str | None = None,\n api_endpoint: str | None = None,\n database_name: str | None = None,\n ):\n # If the api_endpoint is set, return it\n if api_endpoint:\n return api_endpoint\n\n # Check if the database_name is like a url\n if database_name and database_name.startswith(\"https://\"):\n return database_name\n\n # If the database is not set, nothing we can do.\n if not database_name:\n return None\n\n # Grab the database object\n db = cls.get_database_list_static(token=token, environment=environment).get(database_name)\n if not db:\n return None\n\n # Otherwise, get the URL from the database list\n return db.get(\"api_endpoint\")\n\n def get_api_endpoint(self):\n return self.get_api_endpoint_static(\n token=self.token,\n environment=self.environment,\n api_endpoint=self.api_endpoint,\n database_name=self.database_name,\n )\n\n @classmethod\n def get_database_id_static(cls, api_endpoint: str) -> str | None:\n # Pattern matches standard UUID format: 8-4-4-4-12 hexadecimal characters\n uuid_pattern = r\"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}\"\n match = re.search(uuid_pattern, api_endpoint)\n\n return match.group(0) if match else None\n\n def get_database_id(self):\n return self.get_database_id_static(api_endpoint=self.get_api_endpoint())\n\n def get_keyspace(self):\n keyspace = self.keyspace\n\n if keyspace:\n return keyspace.strip()\n\n return \"default_keyspace\"\n\n def get_database_object(self, api_endpoint: str | None = None):\n try:\n client = DataAPIClient(environment=self.environment)\n\n return client.get_database(\n api_endpoint or self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n except Exception as e:\n msg = f\"Error fetching database object: {e}\"\n raise ValueError(msg) from e\n\n def collection_data(self, collection_name: str, database: Database | None = None):\n try:\n if not database:\n client = DataAPIClient(environment=self.environment)\n\n database = client.get_database(\n self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n\n collection = database.get_collection(collection_name)\n\n return collection.estimated_document_count()\n except Exception as e: # noqa: BLE001\n self.log(f\"Error checking collection data: {e}\")\n\n return None\n\n def _initialize_database_options(self):\n try:\n return [\n {\n \"name\": name,\n \"status\": info[\"status\"],\n \"collections\": info[\"collections\"],\n \"api_endpoint\": info[\"api_endpoint\"],\n \"keyspaces\": info[\"keyspaces\"],\n \"org_id\": info[\"org_id\"],\n }\n for name, info in self.get_database_list().items()\n ]\n except Exception as e:\n msg = f\"Error fetching database options: {e}\"\n raise ValueError(msg) from e\n\n @classmethod\n def get_provider_icon(cls, collection: CollectionDescriptor | None = None, provider_name: str | None = None) -> str:\n # Get the provider name from the collection\n provider_name = provider_name or (\n collection.definition.vector.service.provider\n if (\n collection\n and collection.definition\n and collection.definition.vector\n and collection.definition.vector.service\n )\n else None\n )\n\n # If there is no provider, use the vector store icon\n if not provider_name or provider_name.lower() == \"bring your own\":\n return \"vectorstores\"\n\n # Map provider casings\n case_map = {\n \"nvidia\": \"NVIDIA\",\n \"openai\": \"OpenAI\",\n \"amazon bedrock\": \"AmazonBedrockEmbeddings\",\n \"azure openai\": \"AzureOpenAiEmbeddings\",\n \"cohere\": \"Cohere\",\n \"jina ai\": \"JinaAI\",\n \"mistral ai\": \"MistralAI\",\n \"upstage\": \"Upstage\",\n \"voyage ai\": \"VoyageAI\",\n }\n\n # Adjust the casing on some like nvidia\n return case_map[provider_name.lower()] if provider_name.lower() in case_map else provider_name.title()\n\n def _initialize_collection_options(self, api_endpoint: str | None = None):\n # Nothing to generate if we don't have an API endpoint yet\n api_endpoint = api_endpoint or self.get_api_endpoint()\n if not api_endpoint:\n return []\n\n # Retrieve the database object\n database = self.get_database_object(api_endpoint=api_endpoint)\n\n # Get the list of collections\n collection_list = database.list_collections(keyspace=self.get_keyspace())\n\n # Return the list of collections and metadata associated\n return [\n {\n \"name\": col.name,\n \"records\": self.collection_data(collection_name=col.name, database=database),\n \"provider\": (\n col.definition.vector.service.provider\n if col.definition.vector and col.definition.vector.service\n else None\n ),\n \"icon\": self.get_provider_icon(collection=col),\n \"model\": (\n col.definition.vector.service.model_name\n if col.definition.vector and col.definition.vector.service\n else None\n ),\n }\n for col in collection_list\n ]\n\n def reset_provider_options(self, build_config: dict) -> dict:\n \"\"\"Reset provider options and related configurations in the build_config dictionary.\"\"\"\n # Extract template path for cleaner access\n template = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n\n # Get vectorize providers\n vectorize_providers_api = self.get_vectorize_providers(\n token=self.token,\n environment=self.environment,\n api_endpoint=build_config[\"api_endpoint\"][\"value\"],\n )\n\n # Create a new dictionary with \"Bring your own\" first\n vectorize_providers: dict[str, list[list[str]]] = {\"Bring your own\": [[], []]}\n\n # Add the remaining items (only Nvidia) from the original dictionary\n vectorize_providers.update(\n {\n k: v\n for k, v in vectorize_providers_api.items()\n if k.lower() in [\"nvidia\"] # TODO: Eventually support more\n }\n )\n\n # Set provider options\n provider_field = \"02_embedding_generation_provider\"\n template[provider_field][\"options\"] = list(vectorize_providers.keys())\n\n # Add metadata for each provider option\n template[provider_field][\"options_metadata\"] = [\n {\"icon\": self.get_provider_icon(provider_name=provider)} for provider in template[provider_field][\"options\"]\n ]\n\n # Get selected embedding provider\n embedding_provider = template[provider_field][\"value\"]\n is_bring_your_own = embedding_provider and embedding_provider == \"Bring your own\"\n\n # Configure embedding model field\n model_field = \"03_embedding_generation_model\"\n template[model_field].update(\n {\n \"options\": vectorize_providers.get(embedding_provider, [[], []])[1],\n \"placeholder\": \"Bring your own\" if is_bring_your_own else None,\n \"readonly\": is_bring_your_own,\n \"required\": not is_bring_your_own,\n \"value\": None,\n }\n )\n\n # If this is a bring your own, set dimensions to 0\n return self.reset_dimension_field(build_config)\n\n def reset_dimension_field(self, build_config: dict) -> dict:\n \"\"\"Reset dimension field options based on provided configuration.\"\"\"\n # Extract template path for cleaner access\n template = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n\n # Get selected embedding model\n provider_field = \"02_embedding_generation_provider\"\n embedding_provider = template[provider_field][\"value\"]\n is_bring_your_own = embedding_provider and embedding_provider == \"Bring your own\"\n\n # Configure dimension field\n dimension_field = \"04_dimension\"\n dimension_value = 1024 if not is_bring_your_own else None # TODO: Dynamically figure this out\n template[dimension_field].update(\n {\n \"placeholder\": dimension_value,\n \"value\": dimension_value,\n \"readonly\": not is_bring_your_own,\n \"required\": is_bring_your_own,\n }\n )\n\n return build_config\n\n def reset_collection_list(self, build_config: dict) -> dict:\n \"\"\"Reset collection list options based on provided configuration.\"\"\"\n # Get collection options\n collection_options = self._initialize_collection_options(api_endpoint=build_config[\"api_endpoint\"][\"value\"])\n # Update collection configuration\n collection_config = build_config[\"collection_name\"]\n collection_config.update(\n {\n \"options\": [col[\"name\"] for col in collection_options],\n \"options_metadata\": [{k: v for k, v in col.items() if k != \"name\"} for col in collection_options],\n }\n )\n\n # Reset selected collection if not in options\n if collection_config[\"value\"] not in collection_config[\"options\"]:\n collection_config[\"value\"] = \"\"\n\n # Set advanced status based on database selection\n collection_config[\"show\"] = bool(build_config[\"database_name\"][\"value\"])\n\n return build_config\n\n def reset_database_list(self, build_config: dict) -> dict:\n \"\"\"Reset database list options and related configurations.\"\"\"\n # Get database options\n database_options = self._initialize_database_options()\n\n # Update cloud provider options\n env = self.environment\n template = build_config[\"database_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n template[\"02_cloud_provider\"][\"options\"] = list(self.map_cloud_providers()[env].keys())\n\n # Update database configuration\n database_config = build_config[\"database_name\"]\n database_config.update(\n {\n \"options\": [db[\"name\"] for db in database_options],\n \"options_metadata\": [{k: v for k, v in db.items() if k != \"name\"} for db in database_options],\n }\n )\n\n # Reset selections if value not in options\n if database_config[\"value\"] not in database_config[\"options\"]:\n database_config[\"value\"] = \"\"\n build_config[\"api_endpoint\"][\"value\"] = \"\"\n build_config[\"collection_name\"][\"show\"] = False\n\n # Set advanced status based on token presence\n database_config[\"show\"] = bool(build_config[\"token\"][\"value\"])\n\n return build_config\n\n def reset_build_config(self, build_config: dict) -> dict:\n \"\"\"Reset all build configuration options to default empty state.\"\"\"\n # Reset database configuration\n database_config = build_config[\"database_name\"]\n database_config.update({\"options\": [], \"options_metadata\": [], \"value\": \"\", \"show\": False})\n build_config[\"api_endpoint\"][\"value\"] = \"\"\n\n # Reset collection configuration\n collection_config = build_config[\"collection_name\"]\n collection_config.update({\"options\": [], \"options_metadata\": [], \"value\": \"\", \"show\": False})\n\n return build_config\n\n def _handle_hybrid_search_options(self, build_config: dict) -> dict:\n \"\"\"Set hybrid search options in the build configuration.\"\"\"\n # Detect what hybrid options are available\n # Get the admin object\n client = DataAPIClient(environment=self.environment)\n admin_client = client.get_admin()\n db_admin = admin_client.get_database_admin(self.get_api_endpoint(), token=self.token)\n\n # We will try to get the reranking providers to see if its hybrid emabled\n try:\n providers = db_admin.find_reranking_providers()\n build_config[\"reranker\"][\"options\"] = [\n model.name for provider_data in providers.reranking_providers.values() for model in provider_data.models\n ]\n build_config[\"reranker\"][\"options_metadata\"] = [\n {\"icon\": self.get_provider_icon(provider_name=model.name.split(\"/\")[0])}\n for provider in providers.reranking_providers.values()\n for model in provider.models\n ]\n build_config[\"reranker\"][\"value\"] = build_config[\"reranker\"][\"options\"][0]\n\n # Set the default search field to hybrid search\n build_config[\"search_method\"][\"show\"] = True\n build_config[\"search_method\"][\"options\"] = [\"Hybrid Search\", \"Vector Search\"]\n build_config[\"search_method\"][\"value\"] = \"Hybrid Search\"\n except Exception as _: # noqa: BLE001\n build_config[\"reranker\"][\"options\"] = []\n build_config[\"reranker\"][\"options_metadata\"] = []\n\n # Set the default search field to vector search\n build_config[\"search_method\"][\"show\"] = False\n build_config[\"search_method\"][\"options\"] = [\"Vector Search\"]\n build_config[\"search_method\"][\"value\"] = \"Vector Search\"\n\n # Set reranker and lexical terms options based on search method\n build_config[\"reranker\"][\"toggle_value\"] = True\n build_config[\"reranker\"][\"show\"] = build_config[\"search_method\"][\"value\"] == \"Hybrid Search\"\n build_config[\"reranker\"][\"toggle_disable\"] = build_config[\"search_method\"][\"value\"] == \"Hybrid Search\"\n if build_config[\"reranker\"][\"show\"]:\n build_config[\"search_type\"][\"value\"] = \"Similarity\"\n\n return build_config\n\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Update build configuration based on field name and value.\"\"\"\n # Early return if no token provided\n if not self.token:\n return self.reset_build_config(build_config)\n\n # Database creation callback\n if field_name == \"database_name\" and isinstance(field_value, dict):\n if \"01_new_database_name\" in field_value:\n await self._create_new_database(build_config, field_value)\n return self.reset_collection_list(build_config)\n return self._update_cloud_regions(build_config, field_value)\n\n # Collection creation callback\n if field_name == \"collection_name\" and isinstance(field_value, dict):\n # Case 1: New collection creation\n if \"01_new_collection_name\" in field_value:\n await self._create_new_collection(build_config, field_value)\n return build_config\n\n # Case 2: Update embedding provider options\n if \"02_embedding_generation_provider\" in field_value:\n return self.reset_provider_options(build_config)\n\n # Case 3: Update dimension field\n if \"03_embedding_generation_model\" in field_value:\n return self.reset_dimension_field(build_config)\n\n # Initial execution or token/environment change\n first_run = field_name == \"collection_name\" and not field_value and not build_config[\"database_name\"][\"options\"]\n if first_run or field_name in {\"token\", \"environment\"}:\n return self.reset_database_list(build_config)\n\n # Database selection change\n if field_name == \"database_name\" and not isinstance(field_value, dict):\n return self._handle_database_selection(build_config, field_value)\n\n # Keyspace selection change\n if field_name == \"keyspace\":\n return self.reset_collection_list(build_config)\n\n # Collection selection change\n if field_name == \"collection_name\" and not isinstance(field_value, dict):\n return self._handle_collection_selection(build_config, field_value)\n\n # Search method selection change\n if field_name == \"search_method\":\n is_vector_search = field_value == \"Vector Search\"\n is_autodetect = build_config[\"autodetect_collection\"][\"value\"]\n\n # Configure lexical terms (same for both cases)\n build_config[\"lexical_terms\"][\"show\"] = not is_vector_search\n build_config[\"lexical_terms\"][\"value\"] = \"\" if is_vector_search else build_config[\"lexical_terms\"][\"value\"]\n\n # Disable reranker disabling if hybrid search is selected\n build_config[\"reranker\"][\"toggle_disable\"] = not is_vector_search\n build_config[\"reranker\"][\"toggle_value\"] = True\n build_config[\"reranker\"][\"value\"] = build_config[\"reranker\"][\"options\"][0]\n\n # Toggle search type and score threshold based on search method\n build_config[\"search_type\"][\"show\"] = is_vector_search\n build_config[\"search_score_threshold\"][\"show\"] = is_vector_search\n\n # Make sure the search_type is set to \"Similarity\"\n if not is_vector_search or is_autodetect:\n build_config[\"search_type\"][\"value\"] = \"Similarity\"\n\n return build_config\n\n async def _create_new_database(self, build_config: dict, field_value: dict) -> None:\n \"\"\"Create a new database and update build config options.\"\"\"\n try:\n await self.create_database_api(\n new_database_name=field_value[\"01_new_database_name\"],\n token=self.token,\n keyspace=self.get_keyspace(),\n environment=self.environment,\n cloud_provider=field_value[\"02_cloud_provider\"],\n region=field_value[\"03_region\"],\n )\n except Exception as e:\n msg = f\"Error creating database: {e}\"\n raise ValueError(msg) from e\n\n build_config[\"database_name\"][\"options\"].append(field_value[\"01_new_database_name\"])\n build_config[\"database_name\"][\"options_metadata\"].append(\n {\n \"status\": \"PENDING\",\n \"collections\": 0,\n \"api_endpoint\": None,\n \"keyspaces\": [self.get_keyspace()],\n \"org_id\": None,\n }\n )\n\n def _update_cloud_regions(self, build_config: dict, field_value: dict) -> dict:\n \"\"\"Update cloud provider regions in build config.\"\"\"\n env = self.environment\n cloud_provider = field_value[\"02_cloud_provider\"]\n\n # Update the region options based on the selected cloud provider\n template = build_config[\"database_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n template[\"03_region\"][\"options\"] = self.map_cloud_providers()[env][cloud_provider][\"regions\"]\n\n # Reset the the 03_region value if it's not in the new options\n if template[\"03_region\"][\"value\"] not in template[\"03_region\"][\"options\"]:\n template[\"03_region\"][\"value\"] = None\n\n return build_config\n\n async def _create_new_collection(self, build_config: dict, field_value: dict) -> None:\n \"\"\"Create a new collection and update build config options.\"\"\"\n embedding_provider = field_value.get(\"02_embedding_generation_provider\")\n try:\n await self.create_collection_api(\n new_collection_name=field_value[\"01_new_collection_name\"],\n token=self.token,\n api_endpoint=build_config[\"api_endpoint\"][\"value\"],\n environment=self.environment,\n keyspace=self.get_keyspace(),\n dimension=field_value.get(\"04_dimension\") if embedding_provider == \"Bring your own\" else None,\n embedding_generation_provider=embedding_provider,\n embedding_generation_model=field_value.get(\"03_embedding_generation_model\"),\n reranker=self.reranker,\n )\n except Exception as e:\n msg = f\"Error creating collection: {e}\"\n raise ValueError(msg) from e\n\n provider = embedding_provider.lower() if embedding_provider and embedding_provider != \"Bring your own\" else None\n build_config[\"collection_name\"].update(\n {\n \"value\": field_value[\"01_new_collection_name\"],\n \"options\": build_config[\"collection_name\"][\"options\"] + [field_value[\"01_new_collection_name\"]],\n }\n )\n build_config[\"embedding_model\"][\"show\"] = not bool(provider)\n build_config[\"embedding_model\"][\"required\"] = not bool(provider)\n build_config[\"collection_name\"][\"options_metadata\"].append(\n {\n \"records\": 0,\n \"provider\": provider,\n \"icon\": self.get_provider_icon(provider_name=provider),\n \"model\": field_value.get(\"03_embedding_generation_model\"),\n }\n )\n\n # Make sure we always show the reranker options if the collection is hybrid enabled\n # And right now they always are\n build_config[\"lexical_terms\"][\"show\"] = True\n\n def _handle_database_selection(self, build_config: dict, field_value: str) -> dict:\n \"\"\"Handle database selection and update related configurations.\"\"\"\n build_config = self.reset_database_list(build_config)\n\n # Reset collection list if database selection changes\n if field_value not in build_config[\"database_name\"][\"options\"]:\n build_config[\"database_name\"][\"value\"] = \"\"\n return build_config\n\n # Get the api endpoint for the selected database\n index = build_config[\"database_name\"][\"options\"].index(field_value)\n build_config[\"api_endpoint\"][\"value\"] = build_config[\"database_name\"][\"options_metadata\"][index][\"api_endpoint\"]\n\n # Get the org_id for the selected database\n org_id = build_config[\"database_name\"][\"options_metadata\"][index][\"org_id\"]\n if not org_id:\n return build_config\n\n # Update the list of keyspaces based on the db info\n build_config[\"keyspace\"][\"options\"] = build_config[\"database_name\"][\"options_metadata\"][index][\"keyspaces\"]\n build_config[\"keyspace\"][\"value\"] = (\n build_config[\"keyspace\"][\"options\"] and build_config[\"keyspace\"][\"options\"][0]\n if build_config[\"keyspace\"][\"value\"] not in build_config[\"keyspace\"][\"options\"]\n else build_config[\"keyspace\"][\"value\"]\n )\n\n # Get the database id for the selected database\n db_id = self.get_database_id_static(api_endpoint=build_config[\"api_endpoint\"][\"value\"])\n keyspace = self.get_keyspace()\n\n # Update the helper text for the embedding provider field\n template = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n template[\"02_embedding_generation_provider\"][\"helper_text\"] = (\n \"To create collections with more embedding provider options, go to \"\n f''\n \"your database in Astra DB.\"\n )\n\n # Reset provider options\n build_config = self.reset_provider_options(build_config)\n\n # Handle hybrid search options\n build_config = self._handle_hybrid_search_options(build_config)\n\n return self.reset_collection_list(build_config)\n\n def _handle_collection_selection(self, build_config: dict, field_value: str) -> dict:\n \"\"\"Handle collection selection and update embedding options.\"\"\"\n build_config[\"autodetect_collection\"][\"value\"] = True\n build_config = self.reset_collection_list(build_config)\n\n # Reset embedding model if collection selection changes\n if field_value and field_value not in build_config[\"collection_name\"][\"options\"]:\n build_config[\"collection_name\"][\"options\"].append(field_value)\n build_config[\"collection_name\"][\"options_metadata\"].append(\n {\n \"records\": 0,\n \"provider\": None,\n \"icon\": \"vectorstores\",\n \"model\": None,\n }\n )\n build_config[\"autodetect_collection\"][\"value\"] = False\n\n if not field_value:\n return build_config\n\n # Get the selected collection index\n index = build_config[\"collection_name\"][\"options\"].index(field_value)\n\n # Set the provider of the selected collection\n provider = build_config[\"collection_name\"][\"options_metadata\"][index][\"provider\"]\n build_config[\"embedding_model\"][\"show\"] = not bool(provider)\n build_config[\"embedding_model\"][\"required\"] = not bool(provider)\n\n # Grab the collection object\n database = self.get_database_object(api_endpoint=build_config[\"api_endpoint\"][\"value\"])\n collection = database.get_collection(\n name=field_value,\n keyspace=build_config[\"keyspace\"][\"value\"],\n )\n\n # Check if hybrid and lexical are enabled\n col_options = collection.options()\n hyb_enabled = col_options.rerank and col_options.rerank.enabled\n lex_enabled = col_options.lexical and col_options.lexical.enabled\n user_hyb_enabled = build_config[\"search_method\"][\"value\"] == \"Hybrid Search\"\n\n # Show lexical terms if the collection is hybrid enabled\n build_config[\"lexical_terms\"][\"show\"] = hyb_enabled and lex_enabled and user_hyb_enabled\n\n return build_config\n\n @check_cached_vector_store\n def build_vector_store(self):\n try:\n from langchain_astradb import AstraDBVectorStore\n except ImportError as e:\n msg = (\n \"Could not import langchain Astra DB integration package. \"\n \"Please install it with `pip install langchain-astradb`.\"\n )\n raise ImportError(msg) from e\n\n # Get the embedding model and additional params\n embedding_params = {\"embedding\": self.embedding_model} if self.embedding_model else {}\n\n # Get the additional parameters\n additional_params = self.astradb_vectorstore_kwargs or {}\n\n # Get Langflow version and platform information\n __version__ = get_version_info()[\"version\"]\n langflow_prefix = \"\"\n # if os.getenv(\"AWS_EXECUTION_ENV\") == \"AWS_ECS_FARGATE\": # TODO: More precise way of detecting\n # langflow_prefix = \"ds-\"\n\n # Get the database object\n database = self.get_database_object()\n autodetect = self.collection_name in database.list_collection_names() and self.autodetect_collection\n\n # Bundle up the auto-detect parameters\n autodetect_params = {\n \"autodetect_collection\": autodetect,\n \"content_field\": (\n self.content_field\n if self.content_field and embedding_params\n else (\n \"page_content\"\n if embedding_params\n and self.collection_data(collection_name=self.collection_name, database=database) == 0\n else None\n )\n ),\n \"ignore_invalid_documents\": self.ignore_invalid_documents,\n }\n\n # Choose HybridSearchMode based on the selected param\n hybrid_search_mode = HybridSearchMode.DEFAULT if self.search_method == \"Hybrid Search\" else HybridSearchMode.OFF\n\n # Attempt to build the Vector Store object\n try:\n vector_store = AstraDBVectorStore(\n # Astra DB Authentication Parameters\n token=self.token,\n api_endpoint=database.api_endpoint,\n namespace=database.keyspace,\n collection_name=self.collection_name,\n environment=self.environment,\n # Hybrid Search Parameters\n hybrid_search=hybrid_search_mode,\n # Astra DB Usage Tracking Parameters\n ext_callers=[(f\"{langflow_prefix}langflow\", __version__)],\n # Astra DB Vector Store Parameters\n **autodetect_params,\n **embedding_params,\n **additional_params,\n )\n except Exception as e:\n msg = f\"Error initializing AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n # Add documents to the vector store\n self._add_documents_to_vector_store(vector_store)\n\n return vector_store\n\n def _add_documents_to_vector_store(self, vector_store) -> None:\n self.ingest_data = self._prepare_ingest_data()\n\n documents = []\n for _input in self.ingest_data or []:\n if isinstance(_input, Data):\n documents.append(_input.to_lc_document())\n else:\n msg = \"Vector Store Inputs must be Data objects.\"\n raise TypeError(msg)\n\n if documents and self.deletion_field:\n self.log(f\"Deleting documents where {self.deletion_field}\")\n try:\n database = self.get_database_object()\n collection = database.get_collection(self.collection_name, keyspace=database.keyspace)\n delete_values = list({doc.metadata[self.deletion_field] for doc in documents})\n self.log(f\"Deleting documents where {self.deletion_field} matches {delete_values}.\")\n collection.delete_many({f\"metadata.{self.deletion_field}\": {\"$in\": delete_values}})\n except Exception as e:\n msg = f\"Error deleting documents from AstraDBVectorStore based on '{self.deletion_field}': {e}\"\n raise ValueError(msg) from e\n\n if documents:\n self.log(f\"Adding {len(documents)} documents to the Vector Store.\")\n try:\n vector_store.add_documents(documents)\n except Exception as e:\n msg = f\"Error adding documents to AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n else:\n self.log(\"No documents to add to the Vector Store.\")\n\n def _map_search_type(self) -> str:\n search_type_mapping = {\n \"Similarity with score threshold\": \"similarity_score_threshold\",\n \"MMR (Max Marginal Relevance)\": \"mmr\",\n }\n\n return search_type_mapping.get(self.search_type, \"similarity\")\n\n def _build_search_args(self):\n # Clean up the search query\n query = self.search_query if isinstance(self.search_query, str) and self.search_query.strip() else None\n lexical_terms = self.lexical_terms or None\n\n # Check if we have a search query, and if so set the args\n if query:\n args = {\n \"query\": query,\n \"search_type\": self._map_search_type(),\n \"k\": self.number_of_results,\n \"score_threshold\": self.search_score_threshold,\n \"lexical_query\": lexical_terms,\n }\n elif self.advanced_search_filter:\n args = {\n \"n\": self.number_of_results,\n }\n else:\n return {}\n\n filter_arg = self.advanced_search_filter or {}\n if filter_arg:\n args[\"filter\"] = filter_arg\n\n return args\n\n def search_documents(self, vector_store=None) -> list[Data]:\n vector_store = vector_store or self.build_vector_store()\n\n self.log(f\"Search input: {self.search_query}\")\n self.log(f\"Search type: {self.search_type}\")\n self.log(f\"Number of results: {self.number_of_results}\")\n self.log(f\"store.hybrid_search: {vector_store.hybrid_search}\")\n self.log(f\"Lexical terms: {self.lexical_terms}\")\n self.log(f\"Reranker: {self.reranker}\")\n\n try:\n search_args = self._build_search_args()\n except Exception as e:\n msg = f\"Error in AstraDBVectorStore._build_search_args: {e}\"\n raise ValueError(msg) from e\n\n if not search_args:\n self.log(\"No search input or filters provided. Skipping search.\")\n return []\n\n docs = []\n search_method = \"search\" if \"query\" in search_args else \"metadata_search\"\n\n try:\n self.log(f\"Calling vector_store.{search_method} with args: {search_args}\")\n docs = getattr(vector_store, search_method)(**search_args)\n except Exception as e:\n msg = f\"Error performing {search_method} in AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n self.log(f\"Retrieved documents: {len(docs)}\")\n\n data = docs_to_data(docs)\n self.log(f\"Converted documents to data: {len(data)}\")\n self.status = data\n\n return data\n\n def get_retriever_kwargs(self):\n search_args = self._build_search_args()\n\n return {\n \"search_type\": self._map_search_type(),\n \"search_kwargs\": search_args,\n }\n"
+ "value": "import re\nfrom collections import defaultdict\nfrom dataclasses import asdict, dataclass, field\n\nfrom astrapy import DataAPIClient, Database\nfrom astrapy.data.info.reranking import RerankServiceOptions\nfrom astrapy.info import CollectionDescriptor, CollectionLexicalOptions, CollectionRerankOptions\nfrom langchain_astradb import AstraDBVectorStore, VectorServiceOptions\nfrom langchain_astradb.utils.astradb import HybridSearchMode, _AstraDBCollectionEnvironment\n\nfrom langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom langflow.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom langflow.helpers.data import docs_to_data\nfrom langflow.inputs.inputs import FloatInput, NestedDictInput\nfrom langflow.io import (\n BoolInput,\n DropdownInput,\n HandleInput,\n IntInput,\n QueryInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.data import Data\nfrom langflow.utils.version import get_version_info\n\n\n@vector_store_connection\nclass AstraDBVectorStoreComponent(LCVectorStoreComponent):\n display_name: str = \"Astra DB\"\n description: str = \"Ingest and search documents in Astra DB\"\n documentation: str = \"https://docs.datastax.com/en/langflow/astra-components.html\"\n name = \"AstraDB\"\n icon: str = \"AstraDB\"\n\n _cached_vector_store: AstraDBVectorStore | None = None\n\n @dataclass\n class NewDatabaseInput:\n functionality: str = \"create\"\n fields: dict[str, dict] = field(\n default_factory=lambda: {\n \"data\": {\n \"node\": {\n \"name\": \"create_database\",\n \"description\": \"Please allow several minutes for creation to complete.\",\n \"display_name\": \"Create new database\",\n \"field_order\": [\"01_new_database_name\", \"02_cloud_provider\", \"03_region\"],\n \"template\": {\n \"01_new_database_name\": StrInput(\n name=\"new_database_name\",\n display_name=\"Name\",\n info=\"Name of the new database to create in Astra DB.\",\n required=True,\n ),\n \"02_cloud_provider\": DropdownInput(\n name=\"cloud_provider\",\n display_name=\"Cloud provider\",\n info=\"Cloud provider for the new database.\",\n options=[],\n required=True,\n real_time_refresh=True,\n ),\n \"03_region\": DropdownInput(\n name=\"region\",\n display_name=\"Region\",\n info=\"Region for the new database.\",\n options=[],\n required=True,\n ),\n },\n },\n }\n }\n )\n\n @dataclass\n class NewCollectionInput:\n functionality: str = \"create\"\n fields: dict[str, dict] = field(\n default_factory=lambda: {\n \"data\": {\n \"node\": {\n \"name\": \"create_collection\",\n \"description\": \"Please allow several seconds for creation to complete.\",\n \"display_name\": \"Create new collection\",\n \"field_order\": [\n \"01_new_collection_name\",\n \"02_embedding_generation_provider\",\n \"03_embedding_generation_model\",\n \"04_dimension\",\n ],\n \"template\": {\n \"01_new_collection_name\": StrInput(\n name=\"new_collection_name\",\n display_name=\"Name\",\n info=\"Name of the new collection to create in Astra DB.\",\n required=True,\n ),\n \"02_embedding_generation_provider\": DropdownInput(\n name=\"embedding_generation_provider\",\n display_name=\"Embedding generation method\",\n info=\"Provider to use for generating embeddings.\",\n helper_text=(\n \"To create collections with more embedding provider options, go to \"\n 'your database in Astra DB'\n ),\n real_time_refresh=True,\n required=True,\n options=[],\n ),\n \"03_embedding_generation_model\": DropdownInput(\n name=\"embedding_generation_model\",\n display_name=\"Embedding model\",\n info=\"Model to use for generating embeddings.\",\n real_time_refresh=True,\n options=[],\n ),\n \"04_dimension\": IntInput(\n name=\"dimension\",\n display_name=\"Dimensions\",\n info=\"Dimensions of the embeddings to generate.\",\n value=None,\n ),\n },\n },\n }\n }\n )\n\n inputs = [\n SecretStrInput(\n name=\"token\",\n display_name=\"Astra DB Application Token\",\n info=\"Authentication token for accessing Astra DB.\",\n value=\"ASTRA_DB_APPLICATION_TOKEN\",\n required=True,\n real_time_refresh=True,\n input_types=[],\n ),\n DropdownInput(\n name=\"environment\",\n display_name=\"Environment\",\n info=\"The environment for the Astra DB API Endpoint.\",\n options=[\"prod\", \"test\", \"dev\"],\n value=\"prod\",\n advanced=True,\n real_time_refresh=True,\n combobox=True,\n ),\n DropdownInput(\n name=\"database_name\",\n display_name=\"Database\",\n info=\"The Database name for the Astra DB instance.\",\n required=True,\n refresh_button=True,\n real_time_refresh=True,\n dialog_inputs=asdict(NewDatabaseInput()),\n combobox=True,\n ),\n StrInput(\n name=\"api_endpoint\",\n display_name=\"Astra DB API Endpoint\",\n info=\"The API Endpoint for the Astra DB instance. Supercedes database selection.\",\n show=False,\n ),\n DropdownInput(\n name=\"keyspace\",\n display_name=\"Keyspace\",\n info=\"Optional keyspace within Astra DB to use for the collection.\",\n advanced=True,\n options=[],\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"collection_name\",\n display_name=\"Collection\",\n info=\"The name of the collection within Astra DB where the vectors will be stored.\",\n required=True,\n refresh_button=True,\n real_time_refresh=True,\n dialog_inputs=asdict(NewCollectionInput()),\n combobox=True,\n show=False,\n ),\n HandleInput(\n name=\"embedding_model\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Specify the Embedding Model. Not required for Astra Vectorize collections.\",\n required=False,\n show=False,\n ),\n *LCVectorStoreComponent.inputs,\n DropdownInput(\n name=\"search_method\",\n display_name=\"Search Method\",\n info=(\n \"Determine how your content is matched: Vector finds semantic similarity, \"\n \"and Hybrid Search (suggested) combines both approaches \"\n \"with a reranker.\"\n ),\n options=[\"Hybrid Search\", \"Vector Search\"], # TODO: Restore Lexical Search?\n options_metadata=[{\"icon\": \"SearchHybrid\"}, {\"icon\": \"SearchVector\"}],\n value=\"Vector Search\",\n advanced=True,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"reranker\",\n display_name=\"Reranker\",\n info=\"Post-retrieval model that re-scores results for optimal relevance ranking.\",\n show=False,\n toggle=True,\n ),\n QueryInput(\n name=\"lexical_terms\",\n display_name=\"Lexical Terms\",\n info=\"Add additional terms/keywords to augment search precision.\",\n placeholder=\"Enter terms to search...\",\n separator=\" \",\n show=False,\n value=\"\",\n advanced=True,\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Number of Search Results\",\n info=\"Number of search results to return.\",\n advanced=True,\n value=4,\n ),\n DropdownInput(\n name=\"search_type\",\n display_name=\"Search Type\",\n info=\"Search type to use\",\n options=[\"Similarity\", \"Similarity with score threshold\", \"MMR (Max Marginal Relevance)\"],\n value=\"Similarity\",\n advanced=True,\n ),\n FloatInput(\n name=\"search_score_threshold\",\n display_name=\"Search Score Threshold\",\n info=\"Minimum similarity score threshold for search results. \"\n \"(when using 'Similarity with score threshold')\",\n value=0,\n advanced=True,\n ),\n NestedDictInput(\n name=\"advanced_search_filter\",\n display_name=\"Search Metadata Filter\",\n info=\"Optional dictionary of filters to apply to the search query.\",\n advanced=True,\n ),\n BoolInput(\n name=\"autodetect_collection\",\n display_name=\"Autodetect Collection\",\n info=\"Boolean flag to determine whether to autodetect the collection.\",\n advanced=True,\n value=True,\n ),\n StrInput(\n name=\"content_field\",\n display_name=\"Content Field\",\n info=\"Field to use as the text content field for the vector store.\",\n advanced=True,\n ),\n StrInput(\n name=\"deletion_field\",\n display_name=\"Deletion Based On Field\",\n info=\"When this parameter is provided, documents in the target collection with \"\n \"metadata field values matching the input metadata field value will be deleted \"\n \"before new data is loaded.\",\n advanced=True,\n ),\n BoolInput(\n name=\"ignore_invalid_documents\",\n display_name=\"Ignore Invalid Documents\",\n info=\"Boolean flag to determine whether to ignore invalid documents at runtime.\",\n advanced=True,\n ),\n NestedDictInput(\n name=\"astradb_vectorstore_kwargs\",\n display_name=\"AstraDBVectorStore Parameters\",\n info=\"Optional dictionary of additional parameters for the AstraDBVectorStore.\",\n advanced=True,\n ),\n ]\n\n @classmethod\n def map_cloud_providers(cls):\n # TODO: Programmatically fetch the regions for each cloud provider\n return {\n \"dev\": {\n \"Amazon Web Services\": {\n \"id\": \"aws\",\n \"regions\": [\"us-west-2\"],\n },\n \"Google Cloud Platform\": {\n \"id\": \"gcp\",\n \"regions\": [\"us-central1\", \"europe-west4\"],\n },\n },\n \"test\": {\n \"Google Cloud Platform\": {\n \"id\": \"gcp\",\n \"regions\": [\"us-central1\"],\n },\n },\n \"prod\": {\n \"Amazon Web Services\": {\n \"id\": \"aws\",\n \"regions\": [\"us-east-2\", \"ap-south-1\", \"eu-west-1\"],\n },\n \"Google Cloud Platform\": {\n \"id\": \"gcp\",\n \"regions\": [\"us-east1\"],\n },\n \"Microsoft Azure\": {\n \"id\": \"azure\",\n \"regions\": [\"westus3\"],\n },\n },\n }\n\n @classmethod\n def get_vectorize_providers(cls, token: str, environment: str | None = None, api_endpoint: str | None = None):\n try:\n # Get the admin object\n client = DataAPIClient(environment=environment)\n admin_client = client.get_admin()\n db_admin = admin_client.get_database_admin(api_endpoint, token=token)\n\n # Get the list of embedding providers\n embedding_providers = db_admin.find_embedding_providers()\n\n vectorize_providers_mapping = {}\n # Map the provider display name to the provider key and models\n for provider_key, provider_data in embedding_providers.embedding_providers.items():\n # Get the provider display name and models\n display_name = provider_data.display_name\n models = [model.name for model in provider_data.models]\n\n # Build our mapping\n vectorize_providers_mapping[display_name] = [provider_key, models]\n\n # Sort the resulting dictionary\n return defaultdict(list, dict(sorted(vectorize_providers_mapping.items())))\n except Exception as _: # noqa: BLE001\n return {}\n\n @classmethod\n async def create_database_api(\n cls,\n new_database_name: str,\n cloud_provider: str,\n region: str,\n token: str,\n environment: str | None = None,\n keyspace: str | None = None,\n ):\n client = DataAPIClient(environment=environment)\n\n # Get the admin object\n admin_client = client.get_admin(token=token)\n\n # Get the environment, set to prod if null like\n my_env = environment or \"prod\"\n\n # Raise a value error if name isn't provided\n if not new_database_name:\n msg = \"Database name is required to create a new database.\"\n raise ValueError(msg)\n\n # Call the create database function\n return await admin_client.async_create_database(\n name=new_database_name,\n cloud_provider=cls.map_cloud_providers()[my_env][cloud_provider][\"id\"],\n region=region,\n keyspace=keyspace,\n wait_until_active=False,\n )\n\n @classmethod\n async def create_collection_api(\n cls,\n new_collection_name: str,\n token: str,\n api_endpoint: str,\n environment: str | None = None,\n keyspace: str | None = None,\n dimension: int | None = None,\n embedding_generation_provider: str | None = None,\n embedding_generation_model: str | None = None,\n reranker: str | None = None,\n ):\n # Build vectorize options, if needed\n vectorize_options = None\n if not dimension:\n providers = cls.get_vectorize_providers(token=token, environment=environment, api_endpoint=api_endpoint)\n vectorize_options = VectorServiceOptions(\n provider=providers.get(embedding_generation_provider, [None, []])[0],\n model_name=embedding_generation_model,\n )\n\n # Raise a value error if name isn't provided\n if not new_collection_name:\n msg = \"Collection name is required to create a new collection.\"\n raise ValueError(msg)\n\n # Define the base arguments being passed to the create collection function\n base_args = {\n \"collection_name\": new_collection_name,\n \"token\": token,\n \"api_endpoint\": api_endpoint,\n \"keyspace\": keyspace,\n \"environment\": environment,\n \"embedding_dimension\": dimension,\n \"collection_vector_service_options\": vectorize_options,\n }\n\n # Add optional arguments if the reranker is set\n if reranker:\n # Split the reranker field into a provider a model name\n provider, _ = reranker.split(\"/\")\n base_args[\"collection_rerank\"] = CollectionRerankOptions(\n service=RerankServiceOptions(provider=provider, model_name=reranker),\n )\n base_args[\"collection_lexical\"] = CollectionLexicalOptions(analyzer=\"STANDARD\")\n\n _AstraDBCollectionEnvironment(**base_args)\n\n @classmethod\n def get_database_list_static(cls, token: str, environment: str | None = None):\n client = DataAPIClient(environment=environment)\n\n # Get the admin object\n admin_client = client.get_admin(token=token)\n\n # Get the list of databases\n db_list = admin_client.list_databases()\n\n # Generate the api endpoint for each database\n db_info_dict = {}\n for db in db_list:\n try:\n # Get the API endpoint for the database\n api_endpoint = db.regions[0].api_endpoint\n\n # Get the number of collections\n try:\n # Get the number of collections in the database\n num_collections = len(\n client.get_database(\n api_endpoint,\n token=token,\n ).list_collection_names()\n )\n except Exception: # noqa: BLE001\n if db.status != \"PENDING\":\n continue\n num_collections = 0\n\n # Add the database to the dictionary\n db_info_dict[db.name] = {\n \"api_endpoint\": api_endpoint,\n \"keyspaces\": db.keyspaces,\n \"collections\": num_collections,\n \"status\": db.status if db.status != \"ACTIVE\" else None,\n \"org_id\": db.org_id if db.org_id else None,\n }\n except Exception: # noqa: BLE001, S110\n pass\n\n return db_info_dict\n\n def get_database_list(self):\n return self.get_database_list_static(\n token=self.token,\n environment=self.environment,\n )\n\n @classmethod\n def get_api_endpoint_static(\n cls,\n token: str,\n environment: str | None = None,\n api_endpoint: str | None = None,\n database_name: str | None = None,\n ):\n # If the api_endpoint is set, return it\n if api_endpoint:\n return api_endpoint\n\n # Check if the database_name is like a url\n if database_name and database_name.startswith(\"https://\"):\n return database_name\n\n # If the database is not set, nothing we can do.\n if not database_name:\n return None\n\n # Grab the database object\n db = cls.get_database_list_static(token=token, environment=environment).get(database_name)\n if not db:\n return None\n\n # Otherwise, get the URL from the database list\n return db.get(\"api_endpoint\")\n\n def get_api_endpoint(self):\n return self.get_api_endpoint_static(\n token=self.token,\n environment=self.environment,\n api_endpoint=self.api_endpoint,\n database_name=self.database_name,\n )\n\n @classmethod\n def get_database_id_static(cls, api_endpoint: str) -> str | None:\n # Pattern matches standard UUID format: 8-4-4-4-12 hexadecimal characters\n uuid_pattern = r\"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}\"\n match = re.search(uuid_pattern, api_endpoint)\n\n return match.group(0) if match else None\n\n def get_database_id(self):\n return self.get_database_id_static(api_endpoint=self.get_api_endpoint())\n\n def get_keyspace(self):\n keyspace = self.keyspace\n\n if keyspace:\n return keyspace.strip()\n\n return \"default_keyspace\"\n\n def get_database_object(self, api_endpoint: str | None = None):\n try:\n client = DataAPIClient(environment=self.environment)\n\n return client.get_database(\n api_endpoint or self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n except Exception as e:\n msg = f\"Error fetching database object: {e}\"\n raise ValueError(msg) from e\n\n def collection_data(self, collection_name: str, database: Database | None = None):\n try:\n if not database:\n client = DataAPIClient(environment=self.environment)\n\n database = client.get_database(\n self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n\n collection = database.get_collection(collection_name)\n\n return collection.estimated_document_count()\n except Exception as e: # noqa: BLE001\n self.log(f\"Error checking collection data: {e}\")\n\n return None\n\n def _initialize_database_options(self):\n try:\n return [\n {\n \"name\": name,\n \"status\": info[\"status\"],\n \"collections\": info[\"collections\"],\n \"api_endpoint\": info[\"api_endpoint\"],\n \"keyspaces\": info[\"keyspaces\"],\n \"org_id\": info[\"org_id\"],\n }\n for name, info in self.get_database_list().items()\n ]\n except Exception as e:\n msg = f\"Error fetching database options: {e}\"\n raise ValueError(msg) from e\n\n @classmethod\n def get_provider_icon(cls, collection: CollectionDescriptor | None = None, provider_name: str | None = None) -> str:\n # Get the provider name from the collection\n provider_name = provider_name or (\n collection.definition.vector.service.provider\n if (\n collection\n and collection.definition\n and collection.definition.vector\n and collection.definition.vector.service\n )\n else None\n )\n\n # If there is no provider, use the vector store icon\n if not provider_name or provider_name.lower() == \"bring your own\":\n return \"vectorstores\"\n\n # Map provider casings\n case_map = {\n \"nvidia\": \"NVIDIA\",\n \"openai\": \"OpenAI\",\n \"amazon bedrock\": \"AmazonBedrockEmbeddings\",\n \"azure openai\": \"AzureOpenAiEmbeddings\",\n \"cohere\": \"Cohere\",\n \"jina ai\": \"JinaAI\",\n \"mistral ai\": \"MistralAI\",\n \"upstage\": \"Upstage\",\n \"voyage ai\": \"VoyageAI\",\n }\n\n # Adjust the casing on some like nvidia\n return case_map[provider_name.lower()] if provider_name.lower() in case_map else provider_name.title()\n\n def _initialize_collection_options(self, api_endpoint: str | None = None):\n # Nothing to generate if we don't have an API endpoint yet\n api_endpoint = api_endpoint or self.get_api_endpoint()\n if not api_endpoint:\n return []\n\n # Retrieve the database object\n database = self.get_database_object(api_endpoint=api_endpoint)\n\n # Get the list of collections\n collection_list = database.list_collections(keyspace=self.get_keyspace())\n\n # Return the list of collections and metadata associated\n return [\n {\n \"name\": col.name,\n \"records\": self.collection_data(collection_name=col.name, database=database),\n \"provider\": (\n col.definition.vector.service.provider\n if col.definition.vector and col.definition.vector.service\n else None\n ),\n \"icon\": self.get_provider_icon(collection=col),\n \"model\": (\n col.definition.vector.service.model_name\n if col.definition.vector and col.definition.vector.service\n else None\n ),\n }\n for col in collection_list\n ]\n\n def reset_provider_options(self, build_config: dict) -> dict:\n \"\"\"Reset provider options and related configurations in the build_config dictionary.\"\"\"\n # Extract template path for cleaner access\n template = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n\n # Get vectorize providers\n vectorize_providers_api = self.get_vectorize_providers(\n token=self.token,\n environment=self.environment,\n api_endpoint=build_config[\"api_endpoint\"][\"value\"],\n )\n\n # Create a new dictionary with \"Bring your own\" first\n vectorize_providers: dict[str, list[list[str]]] = {\"Bring your own\": [[], []]}\n\n # Add the remaining items (only Nvidia) from the original dictionary\n vectorize_providers.update(\n {\n k: v\n for k, v in vectorize_providers_api.items()\n if k.lower() in [\"nvidia\"] # TODO: Eventually support more\n }\n )\n\n # Set provider options\n provider_field = \"02_embedding_generation_provider\"\n template[provider_field][\"options\"] = list(vectorize_providers.keys())\n\n # Add metadata for each provider option\n template[provider_field][\"options_metadata\"] = [\n {\"icon\": self.get_provider_icon(provider_name=provider)} for provider in template[provider_field][\"options\"]\n ]\n\n # Get selected embedding provider\n embedding_provider = template[provider_field][\"value\"]\n is_bring_your_own = embedding_provider and embedding_provider == \"Bring your own\"\n\n # Configure embedding model field\n model_field = \"03_embedding_generation_model\"\n template[model_field].update(\n {\n \"options\": vectorize_providers.get(embedding_provider, [[], []])[1],\n \"placeholder\": \"Bring your own\" if is_bring_your_own else None,\n \"readonly\": is_bring_your_own,\n \"required\": not is_bring_your_own,\n \"value\": None,\n }\n )\n\n # If this is a bring your own, set dimensions to 0\n return self.reset_dimension_field(build_config)\n\n def reset_dimension_field(self, build_config: dict) -> dict:\n \"\"\"Reset dimension field options based on provided configuration.\"\"\"\n # Extract template path for cleaner access\n template = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n\n # Get selected embedding model\n provider_field = \"02_embedding_generation_provider\"\n embedding_provider = template[provider_field][\"value\"]\n is_bring_your_own = embedding_provider and embedding_provider == \"Bring your own\"\n\n # Configure dimension field\n dimension_field = \"04_dimension\"\n dimension_value = 1024 if not is_bring_your_own else None # TODO: Dynamically figure this out\n template[dimension_field].update(\n {\n \"placeholder\": dimension_value,\n \"value\": dimension_value,\n \"readonly\": not is_bring_your_own,\n \"required\": is_bring_your_own,\n }\n )\n\n return build_config\n\n def reset_collection_list(self, build_config: dict) -> dict:\n \"\"\"Reset collection list options based on provided configuration.\"\"\"\n # Get collection options\n collection_options = self._initialize_collection_options(api_endpoint=build_config[\"api_endpoint\"][\"value\"])\n # Update collection configuration\n collection_config = build_config[\"collection_name\"]\n collection_config.update(\n {\n \"options\": [col[\"name\"] for col in collection_options],\n \"options_metadata\": [{k: v for k, v in col.items() if k != \"name\"} for col in collection_options],\n }\n )\n\n # Reset selected collection if not in options\n if collection_config[\"value\"] not in collection_config[\"options\"]:\n collection_config[\"value\"] = \"\"\n\n # Set advanced status based on database selection\n collection_config[\"show\"] = bool(build_config[\"database_name\"][\"value\"])\n\n return build_config\n\n def reset_database_list(self, build_config: dict) -> dict:\n \"\"\"Reset database list options and related configurations.\"\"\"\n # Get database options\n database_options = self._initialize_database_options()\n\n # Update cloud provider options\n env = self.environment\n template = build_config[\"database_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n template[\"02_cloud_provider\"][\"options\"] = list(self.map_cloud_providers()[env].keys())\n\n # Update database configuration\n database_config = build_config[\"database_name\"]\n database_config.update(\n {\n \"options\": [db[\"name\"] for db in database_options],\n \"options_metadata\": [{k: v for k, v in db.items() if k != \"name\"} for db in database_options],\n }\n )\n\n # Reset selections if value not in options\n if database_config[\"value\"] not in database_config[\"options\"]:\n database_config[\"value\"] = \"\"\n build_config[\"api_endpoint\"][\"value\"] = \"\"\n build_config[\"collection_name\"][\"show\"] = False\n\n # Set advanced status based on token presence\n database_config[\"show\"] = bool(build_config[\"token\"][\"value\"])\n\n return build_config\n\n def reset_build_config(self, build_config: dict) -> dict:\n \"\"\"Reset all build configuration options to default empty state.\"\"\"\n # Reset database configuration\n database_config = build_config[\"database_name\"]\n database_config.update({\"options\": [], \"options_metadata\": [], \"value\": \"\", \"show\": False})\n build_config[\"api_endpoint\"][\"value\"] = \"\"\n\n # Reset collection configuration\n collection_config = build_config[\"collection_name\"]\n collection_config.update({\"options\": [], \"options_metadata\": [], \"value\": \"\", \"show\": False})\n\n return build_config\n\n def _handle_hybrid_search_options(self, build_config: dict) -> dict:\n \"\"\"Set hybrid search options in the build configuration.\"\"\"\n # Detect what hybrid options are available\n # Get the admin object\n client = DataAPIClient(environment=self.environment)\n admin_client = client.get_admin()\n db_admin = admin_client.get_database_admin(self.get_api_endpoint(), token=self.token)\n\n # We will try to get the reranking providers to see if its hybrid emabled\n try:\n providers = db_admin.find_reranking_providers()\n build_config[\"reranker\"][\"options\"] = [\n model.name for provider_data in providers.reranking_providers.values() for model in provider_data.models\n ]\n build_config[\"reranker\"][\"options_metadata\"] = [\n {\"icon\": self.get_provider_icon(provider_name=model.name.split(\"/\")[0])}\n for provider in providers.reranking_providers.values()\n for model in provider.models\n ]\n build_config[\"reranker\"][\"value\"] = build_config[\"reranker\"][\"options\"][0]\n\n # Set the default search field to hybrid search\n build_config[\"search_method\"][\"show\"] = True\n build_config[\"search_method\"][\"options\"] = [\"Hybrid Search\", \"Vector Search\"]\n build_config[\"search_method\"][\"value\"] = \"Hybrid Search\"\n except Exception as _: # noqa: BLE001\n build_config[\"reranker\"][\"options\"] = []\n build_config[\"reranker\"][\"options_metadata\"] = []\n\n # Set the default search field to vector search\n build_config[\"search_method\"][\"show\"] = False\n build_config[\"search_method\"][\"options\"] = [\"Vector Search\"]\n build_config[\"search_method\"][\"value\"] = \"Vector Search\"\n\n # Set reranker and lexical terms options based on search method\n build_config[\"reranker\"][\"toggle_value\"] = True\n build_config[\"reranker\"][\"show\"] = build_config[\"search_method\"][\"value\"] == \"Hybrid Search\"\n build_config[\"reranker\"][\"toggle_disable\"] = build_config[\"search_method\"][\"value\"] == \"Hybrid Search\"\n if build_config[\"reranker\"][\"show\"]:\n build_config[\"search_type\"][\"value\"] = \"Similarity\"\n\n return build_config\n\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Update build configuration based on field name and value.\"\"\"\n # Early return if no token provided\n if not self.token:\n return self.reset_build_config(build_config)\n\n # Database creation callback\n if field_name == \"database_name\" and isinstance(field_value, dict):\n if \"01_new_database_name\" in field_value:\n await self._create_new_database(build_config, field_value)\n return self.reset_collection_list(build_config)\n return self._update_cloud_regions(build_config, field_value)\n\n # Collection creation callback\n if field_name == \"collection_name\" and isinstance(field_value, dict):\n # Case 1: New collection creation\n if \"01_new_collection_name\" in field_value:\n await self._create_new_collection(build_config, field_value)\n return build_config\n\n # Case 2: Update embedding provider options\n if \"02_embedding_generation_provider\" in field_value:\n return self.reset_provider_options(build_config)\n\n # Case 3: Update dimension field\n if \"03_embedding_generation_model\" in field_value:\n return self.reset_dimension_field(build_config)\n\n # Initial execution or token/environment change\n first_run = field_name == \"collection_name\" and not field_value and not build_config[\"database_name\"][\"options\"]\n if first_run or field_name in {\"token\", \"environment\"}:\n return self.reset_database_list(build_config)\n\n # Database selection change\n if field_name == \"database_name\" and not isinstance(field_value, dict):\n return self._handle_database_selection(build_config, field_value)\n\n # Keyspace selection change\n if field_name == \"keyspace\":\n return self.reset_collection_list(build_config)\n\n # Collection selection change\n if field_name == \"collection_name\" and not isinstance(field_value, dict):\n return self._handle_collection_selection(build_config, field_value)\n\n # Search method selection change\n if field_name == \"search_method\":\n is_vector_search = field_value == \"Vector Search\"\n is_autodetect = build_config[\"autodetect_collection\"][\"value\"]\n\n # Configure lexical terms (same for both cases)\n build_config[\"lexical_terms\"][\"show\"] = not is_vector_search\n build_config[\"lexical_terms\"][\"value\"] = \"\" if is_vector_search else build_config[\"lexical_terms\"][\"value\"]\n\n # Disable reranker disabling if hybrid search is selected\n build_config[\"reranker\"][\"toggle_disable\"] = not is_vector_search\n build_config[\"reranker\"][\"toggle_value\"] = True\n build_config[\"reranker\"][\"value\"] = build_config[\"reranker\"][\"options\"][0]\n\n # Toggle search type and score threshold based on search method\n build_config[\"search_type\"][\"show\"] = is_vector_search\n build_config[\"search_score_threshold\"][\"show\"] = is_vector_search\n\n # Make sure the search_type is set to \"Similarity\"\n if not is_vector_search or is_autodetect:\n build_config[\"search_type\"][\"value\"] = \"Similarity\"\n\n return build_config\n\n async def _create_new_database(self, build_config: dict, field_value: dict) -> None:\n \"\"\"Create a new database and update build config options.\"\"\"\n try:\n await self.create_database_api(\n new_database_name=field_value[\"01_new_database_name\"],\n token=self.token,\n keyspace=self.get_keyspace(),\n environment=self.environment,\n cloud_provider=field_value[\"02_cloud_provider\"],\n region=field_value[\"03_region\"],\n )\n except Exception as e:\n msg = f\"Error creating database: {e}\"\n raise ValueError(msg) from e\n\n build_config[\"database_name\"][\"options\"].append(field_value[\"01_new_database_name\"])\n build_config[\"database_name\"][\"options_metadata\"].append(\n {\n \"status\": \"PENDING\",\n \"collections\": 0,\n \"api_endpoint\": None,\n \"keyspaces\": [self.get_keyspace()],\n \"org_id\": None,\n }\n )\n\n def _update_cloud_regions(self, build_config: dict, field_value: dict) -> dict:\n \"\"\"Update cloud provider regions in build config.\"\"\"\n env = self.environment\n cloud_provider = field_value[\"02_cloud_provider\"]\n\n # Update the region options based on the selected cloud provider\n template = build_config[\"database_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n template[\"03_region\"][\"options\"] = self.map_cloud_providers()[env][cloud_provider][\"regions\"]\n\n # Reset the the 03_region value if it's not in the new options\n if template[\"03_region\"][\"value\"] not in template[\"03_region\"][\"options\"]:\n template[\"03_region\"][\"value\"] = None\n\n return build_config\n\n async def _create_new_collection(self, build_config: dict, field_value: dict) -> None:\n \"\"\"Create a new collection and update build config options.\"\"\"\n embedding_provider = field_value.get(\"02_embedding_generation_provider\")\n try:\n await self.create_collection_api(\n new_collection_name=field_value[\"01_new_collection_name\"],\n token=self.token,\n api_endpoint=build_config[\"api_endpoint\"][\"value\"],\n environment=self.environment,\n keyspace=self.get_keyspace(),\n dimension=field_value.get(\"04_dimension\") if embedding_provider == \"Bring your own\" else None,\n embedding_generation_provider=embedding_provider,\n embedding_generation_model=field_value.get(\"03_embedding_generation_model\"),\n reranker=self.reranker,\n )\n except Exception as e:\n msg = f\"Error creating collection: {e}\"\n raise ValueError(msg) from e\n\n provider = embedding_provider.lower() if embedding_provider and embedding_provider != \"Bring your own\" else None\n build_config[\"collection_name\"].update(\n {\n \"value\": field_value[\"01_new_collection_name\"],\n \"options\": build_config[\"collection_name\"][\"options\"] + [field_value[\"01_new_collection_name\"]],\n }\n )\n build_config[\"embedding_model\"][\"show\"] = not bool(provider)\n build_config[\"embedding_model\"][\"required\"] = not bool(provider)\n build_config[\"collection_name\"][\"options_metadata\"].append(\n {\n \"records\": 0,\n \"provider\": provider,\n \"icon\": self.get_provider_icon(provider_name=provider),\n \"model\": field_value.get(\"03_embedding_generation_model\"),\n }\n )\n\n # Make sure we always show the reranker options if the collection is hybrid enabled\n # And right now they always are\n build_config[\"lexical_terms\"][\"show\"] = True\n\n def _handle_database_selection(self, build_config: dict, field_value: str) -> dict:\n \"\"\"Handle database selection and update related configurations.\"\"\"\n build_config = self.reset_database_list(build_config)\n\n # Reset collection list if database selection changes\n if field_value not in build_config[\"database_name\"][\"options\"]:\n build_config[\"database_name\"][\"value\"] = \"\"\n return build_config\n\n # Get the api endpoint for the selected database\n index = build_config[\"database_name\"][\"options\"].index(field_value)\n build_config[\"api_endpoint\"][\"value\"] = build_config[\"database_name\"][\"options_metadata\"][index][\"api_endpoint\"]\n\n # Get the org_id for the selected database\n org_id = build_config[\"database_name\"][\"options_metadata\"][index][\"org_id\"]\n if not org_id:\n return build_config\n\n # Update the list of keyspaces based on the db info\n build_config[\"keyspace\"][\"options\"] = build_config[\"database_name\"][\"options_metadata\"][index][\"keyspaces\"]\n build_config[\"keyspace\"][\"value\"] = (\n build_config[\"keyspace\"][\"options\"] and build_config[\"keyspace\"][\"options\"][0]\n if build_config[\"keyspace\"][\"value\"] not in build_config[\"keyspace\"][\"options\"]\n else build_config[\"keyspace\"][\"value\"]\n )\n\n # Get the database id for the selected database\n db_id = self.get_database_id_static(api_endpoint=build_config[\"api_endpoint\"][\"value\"])\n keyspace = self.get_keyspace()\n\n # Update the helper text for the embedding provider field\n template = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n template[\"02_embedding_generation_provider\"][\"helper_text\"] = (\n \"To create collections with more embedding provider options, go to \"\n f''\n \"your database in Astra DB.\"\n )\n\n # Reset provider options\n build_config = self.reset_provider_options(build_config)\n\n # Handle hybrid search options\n build_config = self._handle_hybrid_search_options(build_config)\n\n return self.reset_collection_list(build_config)\n\n def _handle_collection_selection(self, build_config: dict, field_value: str) -> dict:\n \"\"\"Handle collection selection and update embedding options.\"\"\"\n build_config[\"autodetect_collection\"][\"value\"] = True\n build_config = self.reset_collection_list(build_config)\n\n # Reset embedding model if collection selection changes\n if field_value and field_value not in build_config[\"collection_name\"][\"options\"]:\n build_config[\"collection_name\"][\"options\"].append(field_value)\n build_config[\"collection_name\"][\"options_metadata\"].append(\n {\n \"records\": 0,\n \"provider\": None,\n \"icon\": \"vectorstores\",\n \"model\": None,\n }\n )\n build_config[\"autodetect_collection\"][\"value\"] = False\n\n if not field_value:\n return build_config\n\n # Get the selected collection index\n index = build_config[\"collection_name\"][\"options\"].index(field_value)\n\n # Set the provider of the selected collection\n provider = build_config[\"collection_name\"][\"options_metadata\"][index][\"provider\"]\n build_config[\"embedding_model\"][\"show\"] = not bool(provider)\n build_config[\"embedding_model\"][\"required\"] = not bool(provider)\n\n # Grab the collection object\n database = self.get_database_object(api_endpoint=build_config[\"api_endpoint\"][\"value\"])\n collection = database.get_collection(\n name=field_value,\n keyspace=build_config[\"keyspace\"][\"value\"],\n )\n\n # Check if hybrid and lexical are enabled\n col_options = collection.options()\n hyb_enabled = col_options.rerank and col_options.rerank.enabled\n lex_enabled = col_options.lexical and col_options.lexical.enabled\n user_hyb_enabled = build_config[\"search_method\"][\"value\"] == \"Hybrid Search\"\n\n # Show lexical terms if the collection is hybrid enabled\n build_config[\"lexical_terms\"][\"show\"] = hyb_enabled and lex_enabled and user_hyb_enabled\n\n return build_config\n\n @check_cached_vector_store\n def build_vector_store(self):\n try:\n from langchain_astradb import AstraDBVectorStore\n except ImportError as e:\n msg = (\n \"Could not import langchain Astra DB integration package. \"\n \"Please install it with `pip install langchain-astradb`.\"\n )\n raise ImportError(msg) from e\n\n # Get the embedding model and additional params\n embedding_params = {\"embedding\": self.embedding_model} if self.embedding_model else {}\n\n # Get the additional parameters\n additional_params = self.astradb_vectorstore_kwargs or {}\n\n # Get Langflow version and platform information\n __version__ = get_version_info()[\"version\"]\n langflow_prefix = \"\"\n # if os.getenv(\"AWS_EXECUTION_ENV\") == \"AWS_ECS_FARGATE\": # TODO: More precise way of detecting\n # langflow_prefix = \"ds-\"\n\n # Get the database object\n database = self.get_database_object()\n autodetect = self.collection_name in database.list_collection_names() and self.autodetect_collection\n\n # Bundle up the auto-detect parameters\n autodetect_params = {\n \"autodetect_collection\": autodetect,\n \"content_field\": (\n self.content_field\n if self.content_field and embedding_params\n else (\n \"page_content\"\n if embedding_params\n and self.collection_data(collection_name=self.collection_name, database=database) == 0\n else None\n )\n ),\n \"ignore_invalid_documents\": self.ignore_invalid_documents,\n }\n\n # Choose HybridSearchMode based on the selected param\n hybrid_search_mode = HybridSearchMode.DEFAULT if self.search_method == \"Hybrid Search\" else HybridSearchMode.OFF\n\n # Attempt to build the Vector Store object\n try:\n vector_store = AstraDBVectorStore(\n # Astra DB Authentication Parameters\n token=self.token,\n api_endpoint=database.api_endpoint,\n namespace=database.keyspace,\n collection_name=self.collection_name,\n environment=self.environment,\n # Hybrid Search Parameters\n hybrid_search=hybrid_search_mode,\n # Astra DB Usage Tracking Parameters\n ext_callers=[(f\"{langflow_prefix}langflow\", __version__)],\n # Astra DB Vector Store Parameters\n **autodetect_params,\n **embedding_params,\n **additional_params,\n )\n except Exception as e:\n msg = f\"Error initializing AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n # Add documents to the vector store\n self._add_documents_to_vector_store(vector_store)\n\n return vector_store\n\n def _add_documents_to_vector_store(self, vector_store) -> None:\n self.ingest_data = self._prepare_ingest_data()\n\n documents = []\n for _input in self.ingest_data or []:\n if isinstance(_input, Data):\n documents.append(_input.to_lc_document())\n else:\n msg = \"Vector Store Inputs must be Data objects.\"\n raise TypeError(msg)\n\n if documents and self.deletion_field:\n self.log(f\"Deleting documents where {self.deletion_field}\")\n try:\n database = self.get_database_object()\n collection = database.get_collection(self.collection_name, keyspace=database.keyspace)\n delete_values = list({doc.metadata[self.deletion_field] for doc in documents})\n self.log(f\"Deleting documents where {self.deletion_field} matches {delete_values}.\")\n collection.delete_many({f\"metadata.{self.deletion_field}\": {\"$in\": delete_values}})\n except Exception as e:\n msg = f\"Error deleting documents from AstraDBVectorStore based on '{self.deletion_field}': {e}\"\n raise ValueError(msg) from e\n\n if documents:\n self.log(f\"Adding {len(documents)} documents to the Vector Store.\")\n try:\n vector_store.add_documents(documents)\n except Exception as e:\n msg = f\"Error adding documents to AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n else:\n self.log(\"No documents to add to the Vector Store.\")\n\n def _map_search_type(self) -> str:\n search_type_mapping = {\n \"Similarity with score threshold\": \"similarity_score_threshold\",\n \"MMR (Max Marginal Relevance)\": \"mmr\",\n }\n\n return search_type_mapping.get(self.search_type, \"similarity\")\n\n def _build_search_args(self):\n # Clean up the search query\n query = self.search_query if isinstance(self.search_query, str) and self.search_query.strip() else None\n lexical_terms = self.lexical_terms or None\n\n # Check if we have a search query, and if so set the args\n if query:\n args = {\n \"query\": query,\n \"search_type\": self._map_search_type(),\n \"k\": self.number_of_results,\n \"score_threshold\": self.search_score_threshold,\n \"lexical_query\": lexical_terms,\n }\n elif self.advanced_search_filter:\n args = {\n \"n\": self.number_of_results,\n }\n else:\n return {}\n\n filter_arg = self.advanced_search_filter or {}\n if filter_arg:\n args[\"filter\"] = filter_arg\n\n return args\n\n def search_documents(self, vector_store=None) -> list[Data]:\n vector_store = vector_store or self.build_vector_store()\n\n self.log(f\"Search input: {self.search_query}\")\n self.log(f\"Search type: {self.search_type}\")\n self.log(f\"Number of results: {self.number_of_results}\")\n self.log(f\"store.hybrid_search: {vector_store.hybrid_search}\")\n self.log(f\"Lexical terms: {self.lexical_terms}\")\n self.log(f\"Reranker: {self.reranker}\")\n\n try:\n search_args = self._build_search_args()\n except Exception as e:\n msg = f\"Error in AstraDBVectorStore._build_search_args: {e}\"\n raise ValueError(msg) from e\n\n if not search_args:\n self.log(\"No search input or filters provided. Skipping search.\")\n return []\n\n docs = []\n search_method = \"search\" if \"query\" in search_args else \"metadata_search\"\n\n try:\n self.log(f\"Calling vector_store.{search_method} with args: {search_args}\")\n docs = getattr(vector_store, search_method)(**search_args)\n except Exception as e:\n msg = f\"Error performing {search_method} in AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n self.log(f\"Retrieved documents: {len(docs)}\")\n\n data = docs_to_data(docs)\n self.log(f\"Converted documents to data: {len(data)}\")\n self.status = data\n\n return data\n\n def get_retriever_kwargs(self):\n search_args = self._build_search_args()\n\n return {\n \"search_type\": self._map_search_type(),\n \"search_kwargs\": search_args,\n }\n"
},
"collection_name": {
"_input_type": "DropdownInput",
@@ -4298,7 +4298,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import re\nfrom collections import defaultdict\nfrom dataclasses import asdict, dataclass, field\n\nfrom astrapy import DataAPIClient, Database\nfrom astrapy.data.info.reranking import RerankServiceOptions\nfrom astrapy.info import CollectionDescriptor, CollectionLexicalOptions, CollectionRerankOptions\nfrom langchain_astradb import AstraDBVectorStore, VectorServiceOptions\nfrom langchain_astradb.utils.astradb import HybridSearchMode, _AstraDBCollectionEnvironment\n\nfrom langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom langflow.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom langflow.helpers import docs_to_data\nfrom langflow.inputs import FloatInput, NestedDictInput\nfrom langflow.io import (\n BoolInput,\n DropdownInput,\n HandleInput,\n IntInput,\n QueryInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema import Data\nfrom langflow.utils.version import get_version_info\n\n\n@vector_store_connection\nclass AstraDBVectorStoreComponent(LCVectorStoreComponent):\n display_name: str = \"Astra DB\"\n description: str = \"Ingest and search documents in Astra DB\"\n documentation: str = \"https://docs.datastax.com/en/langflow/astra-components.html\"\n name = \"AstraDB\"\n icon: str = \"AstraDB\"\n\n _cached_vector_store: AstraDBVectorStore | None = None\n\n @dataclass\n class NewDatabaseInput:\n functionality: str = \"create\"\n fields: dict[str, dict] = field(\n default_factory=lambda: {\n \"data\": {\n \"node\": {\n \"name\": \"create_database\",\n \"description\": \"Please allow several minutes for creation to complete.\",\n \"display_name\": \"Create new database\",\n \"field_order\": [\"01_new_database_name\", \"02_cloud_provider\", \"03_region\"],\n \"template\": {\n \"01_new_database_name\": StrInput(\n name=\"new_database_name\",\n display_name=\"Name\",\n info=\"Name of the new database to create in Astra DB.\",\n required=True,\n ),\n \"02_cloud_provider\": DropdownInput(\n name=\"cloud_provider\",\n display_name=\"Cloud provider\",\n info=\"Cloud provider for the new database.\",\n options=[],\n required=True,\n real_time_refresh=True,\n ),\n \"03_region\": DropdownInput(\n name=\"region\",\n display_name=\"Region\",\n info=\"Region for the new database.\",\n options=[],\n required=True,\n ),\n },\n },\n }\n }\n )\n\n @dataclass\n class NewCollectionInput:\n functionality: str = \"create\"\n fields: dict[str, dict] = field(\n default_factory=lambda: {\n \"data\": {\n \"node\": {\n \"name\": \"create_collection\",\n \"description\": \"Please allow several seconds for creation to complete.\",\n \"display_name\": \"Create new collection\",\n \"field_order\": [\n \"01_new_collection_name\",\n \"02_embedding_generation_provider\",\n \"03_embedding_generation_model\",\n \"04_dimension\",\n ],\n \"template\": {\n \"01_new_collection_name\": StrInput(\n name=\"new_collection_name\",\n display_name=\"Name\",\n info=\"Name of the new collection to create in Astra DB.\",\n required=True,\n ),\n \"02_embedding_generation_provider\": DropdownInput(\n name=\"embedding_generation_provider\",\n display_name=\"Embedding generation method\",\n info=\"Provider to use for generating embeddings.\",\n helper_text=(\n \"To create collections with more embedding provider options, go to \"\n 'your database in Astra DB'\n ),\n real_time_refresh=True,\n required=True,\n options=[],\n ),\n \"03_embedding_generation_model\": DropdownInput(\n name=\"embedding_generation_model\",\n display_name=\"Embedding model\",\n info=\"Model to use for generating embeddings.\",\n real_time_refresh=True,\n options=[],\n ),\n \"04_dimension\": IntInput(\n name=\"dimension\",\n display_name=\"Dimensions\",\n info=\"Dimensions of the embeddings to generate.\",\n value=None,\n ),\n },\n },\n }\n }\n )\n\n inputs = [\n SecretStrInput(\n name=\"token\",\n display_name=\"Astra DB Application Token\",\n info=\"Authentication token for accessing Astra DB.\",\n value=\"ASTRA_DB_APPLICATION_TOKEN\",\n required=True,\n real_time_refresh=True,\n input_types=[],\n ),\n DropdownInput(\n name=\"environment\",\n display_name=\"Environment\",\n info=\"The environment for the Astra DB API Endpoint.\",\n options=[\"prod\", \"test\", \"dev\"],\n value=\"prod\",\n advanced=True,\n real_time_refresh=True,\n combobox=True,\n ),\n DropdownInput(\n name=\"database_name\",\n display_name=\"Database\",\n info=\"The Database name for the Astra DB instance.\",\n required=True,\n refresh_button=True,\n real_time_refresh=True,\n dialog_inputs=asdict(NewDatabaseInput()),\n combobox=True,\n ),\n StrInput(\n name=\"api_endpoint\",\n display_name=\"Astra DB API Endpoint\",\n info=\"The API Endpoint for the Astra DB instance. Supercedes database selection.\",\n show=False,\n ),\n DropdownInput(\n name=\"keyspace\",\n display_name=\"Keyspace\",\n info=\"Optional keyspace within Astra DB to use for the collection.\",\n advanced=True,\n options=[],\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"collection_name\",\n display_name=\"Collection\",\n info=\"The name of the collection within Astra DB where the vectors will be stored.\",\n required=True,\n refresh_button=True,\n real_time_refresh=True,\n dialog_inputs=asdict(NewCollectionInput()),\n combobox=True,\n show=False,\n ),\n HandleInput(\n name=\"embedding_model\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Specify the Embedding Model. Not required for Astra Vectorize collections.\",\n required=False,\n show=False,\n ),\n *LCVectorStoreComponent.inputs,\n DropdownInput(\n name=\"search_method\",\n display_name=\"Search Method\",\n info=(\n \"Determine how your content is matched: Vector finds semantic similarity, \"\n \"and Hybrid Search (suggested) combines both approaches \"\n \"with a reranker.\"\n ),\n options=[\"Hybrid Search\", \"Vector Search\"], # TODO: Restore Lexical Search?\n options_metadata=[{\"icon\": \"SearchHybrid\"}, {\"icon\": \"SearchVector\"}],\n value=\"Vector Search\",\n advanced=True,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"reranker\",\n display_name=\"Reranker\",\n info=\"Post-retrieval model that re-scores results for optimal relevance ranking.\",\n show=False,\n toggle=True,\n ),\n QueryInput(\n name=\"lexical_terms\",\n display_name=\"Lexical Terms\",\n info=\"Add additional terms/keywords to augment search precision.\",\n placeholder=\"Enter terms to search...\",\n separator=\" \",\n show=False,\n value=\"\",\n advanced=True,\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Number of Search Results\",\n info=\"Number of search results to return.\",\n advanced=True,\n value=4,\n ),\n DropdownInput(\n name=\"search_type\",\n display_name=\"Search Type\",\n info=\"Search type to use\",\n options=[\"Similarity\", \"Similarity with score threshold\", \"MMR (Max Marginal Relevance)\"],\n value=\"Similarity\",\n advanced=True,\n ),\n FloatInput(\n name=\"search_score_threshold\",\n display_name=\"Search Score Threshold\",\n info=\"Minimum similarity score threshold for search results. \"\n \"(when using 'Similarity with score threshold')\",\n value=0,\n advanced=True,\n ),\n NestedDictInput(\n name=\"advanced_search_filter\",\n display_name=\"Search Metadata Filter\",\n info=\"Optional dictionary of filters to apply to the search query.\",\n advanced=True,\n ),\n BoolInput(\n name=\"autodetect_collection\",\n display_name=\"Autodetect Collection\",\n info=\"Boolean flag to determine whether to autodetect the collection.\",\n advanced=True,\n value=True,\n ),\n StrInput(\n name=\"content_field\",\n display_name=\"Content Field\",\n info=\"Field to use as the text content field for the vector store.\",\n advanced=True,\n ),\n StrInput(\n name=\"deletion_field\",\n display_name=\"Deletion Based On Field\",\n info=\"When this parameter is provided, documents in the target collection with \"\n \"metadata field values matching the input metadata field value will be deleted \"\n \"before new data is loaded.\",\n advanced=True,\n ),\n BoolInput(\n name=\"ignore_invalid_documents\",\n display_name=\"Ignore Invalid Documents\",\n info=\"Boolean flag to determine whether to ignore invalid documents at runtime.\",\n advanced=True,\n ),\n NestedDictInput(\n name=\"astradb_vectorstore_kwargs\",\n display_name=\"AstraDBVectorStore Parameters\",\n info=\"Optional dictionary of additional parameters for the AstraDBVectorStore.\",\n advanced=True,\n ),\n ]\n\n @classmethod\n def map_cloud_providers(cls):\n # TODO: Programmatically fetch the regions for each cloud provider\n return {\n \"dev\": {\n \"Amazon Web Services\": {\n \"id\": \"aws\",\n \"regions\": [\"us-west-2\"],\n },\n \"Google Cloud Platform\": {\n \"id\": \"gcp\",\n \"regions\": [\"us-central1\", \"europe-west4\"],\n },\n },\n \"test\": {\n \"Google Cloud Platform\": {\n \"id\": \"gcp\",\n \"regions\": [\"us-central1\"],\n },\n },\n \"prod\": {\n \"Amazon Web Services\": {\n \"id\": \"aws\",\n \"regions\": [\"us-east-2\", \"ap-south-1\", \"eu-west-1\"],\n },\n \"Google Cloud Platform\": {\n \"id\": \"gcp\",\n \"regions\": [\"us-east1\"],\n },\n \"Microsoft Azure\": {\n \"id\": \"azure\",\n \"regions\": [\"westus3\"],\n },\n },\n }\n\n @classmethod\n def get_vectorize_providers(cls, token: str, environment: str | None = None, api_endpoint: str | None = None):\n try:\n # Get the admin object\n client = DataAPIClient(environment=environment)\n admin_client = client.get_admin()\n db_admin = admin_client.get_database_admin(api_endpoint, token=token)\n\n # Get the list of embedding providers\n embedding_providers = db_admin.find_embedding_providers()\n\n vectorize_providers_mapping = {}\n # Map the provider display name to the provider key and models\n for provider_key, provider_data in embedding_providers.embedding_providers.items():\n # Get the provider display name and models\n display_name = provider_data.display_name\n models = [model.name for model in provider_data.models]\n\n # Build our mapping\n vectorize_providers_mapping[display_name] = [provider_key, models]\n\n # Sort the resulting dictionary\n return defaultdict(list, dict(sorted(vectorize_providers_mapping.items())))\n except Exception as _: # noqa: BLE001\n return {}\n\n @classmethod\n async def create_database_api(\n cls,\n new_database_name: str,\n cloud_provider: str,\n region: str,\n token: str,\n environment: str | None = None,\n keyspace: str | None = None,\n ):\n client = DataAPIClient(environment=environment)\n\n # Get the admin object\n admin_client = client.get_admin(token=token)\n\n # Get the environment, set to prod if null like\n my_env = environment or \"prod\"\n\n # Raise a value error if name isn't provided\n if not new_database_name:\n msg = \"Database name is required to create a new database.\"\n raise ValueError(msg)\n\n # Call the create database function\n return await admin_client.async_create_database(\n name=new_database_name,\n cloud_provider=cls.map_cloud_providers()[my_env][cloud_provider][\"id\"],\n region=region,\n keyspace=keyspace,\n wait_until_active=False,\n )\n\n @classmethod\n async def create_collection_api(\n cls,\n new_collection_name: str,\n token: str,\n api_endpoint: str,\n environment: str | None = None,\n keyspace: str | None = None,\n dimension: int | None = None,\n embedding_generation_provider: str | None = None,\n embedding_generation_model: str | None = None,\n reranker: str | None = None,\n ):\n # Build vectorize options, if needed\n vectorize_options = None\n if not dimension:\n providers = cls.get_vectorize_providers(token=token, environment=environment, api_endpoint=api_endpoint)\n vectorize_options = VectorServiceOptions(\n provider=providers.get(embedding_generation_provider, [None, []])[0],\n model_name=embedding_generation_model,\n )\n\n # Raise a value error if name isn't provided\n if not new_collection_name:\n msg = \"Collection name is required to create a new collection.\"\n raise ValueError(msg)\n\n # Define the base arguments being passed to the create collection function\n base_args = {\n \"collection_name\": new_collection_name,\n \"token\": token,\n \"api_endpoint\": api_endpoint,\n \"keyspace\": keyspace,\n \"environment\": environment,\n \"embedding_dimension\": dimension,\n \"collection_vector_service_options\": vectorize_options,\n }\n\n # Add optional arguments if the reranker is set\n if reranker:\n # Split the reranker field into a provider a model name\n provider, _ = reranker.split(\"/\")\n base_args[\"collection_rerank\"] = CollectionRerankOptions(\n service=RerankServiceOptions(provider=provider, model_name=reranker),\n )\n base_args[\"collection_lexical\"] = CollectionLexicalOptions(analyzer=\"STANDARD\")\n\n _AstraDBCollectionEnvironment(**base_args)\n\n @classmethod\n def get_database_list_static(cls, token: str, environment: str | None = None):\n client = DataAPIClient(environment=environment)\n\n # Get the admin object\n admin_client = client.get_admin(token=token)\n\n # Get the list of databases\n db_list = admin_client.list_databases()\n\n # Generate the api endpoint for each database\n db_info_dict = {}\n for db in db_list:\n try:\n # Get the API endpoint for the database\n api_endpoint = db.regions[0].api_endpoint\n\n # Get the number of collections\n try:\n # Get the number of collections in the database\n num_collections = len(\n client.get_database(\n api_endpoint,\n token=token,\n ).list_collection_names()\n )\n except Exception: # noqa: BLE001\n if db.status != \"PENDING\":\n continue\n num_collections = 0\n\n # Add the database to the dictionary\n db_info_dict[db.name] = {\n \"api_endpoint\": api_endpoint,\n \"keyspaces\": db.keyspaces,\n \"collections\": num_collections,\n \"status\": db.status if db.status != \"ACTIVE\" else None,\n \"org_id\": db.org_id if db.org_id else None,\n }\n except Exception: # noqa: BLE001, S110\n pass\n\n return db_info_dict\n\n def get_database_list(self):\n return self.get_database_list_static(\n token=self.token,\n environment=self.environment,\n )\n\n @classmethod\n def get_api_endpoint_static(\n cls,\n token: str,\n environment: str | None = None,\n api_endpoint: str | None = None,\n database_name: str | None = None,\n ):\n # If the api_endpoint is set, return it\n if api_endpoint:\n return api_endpoint\n\n # Check if the database_name is like a url\n if database_name and database_name.startswith(\"https://\"):\n return database_name\n\n # If the database is not set, nothing we can do.\n if not database_name:\n return None\n\n # Grab the database object\n db = cls.get_database_list_static(token=token, environment=environment).get(database_name)\n if not db:\n return None\n\n # Otherwise, get the URL from the database list\n return db.get(\"api_endpoint\")\n\n def get_api_endpoint(self):\n return self.get_api_endpoint_static(\n token=self.token,\n environment=self.environment,\n api_endpoint=self.api_endpoint,\n database_name=self.database_name,\n )\n\n @classmethod\n def get_database_id_static(cls, api_endpoint: str) -> str | None:\n # Pattern matches standard UUID format: 8-4-4-4-12 hexadecimal characters\n uuid_pattern = r\"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}\"\n match = re.search(uuid_pattern, api_endpoint)\n\n return match.group(0) if match else None\n\n def get_database_id(self):\n return self.get_database_id_static(api_endpoint=self.get_api_endpoint())\n\n def get_keyspace(self):\n keyspace = self.keyspace\n\n if keyspace:\n return keyspace.strip()\n\n return \"default_keyspace\"\n\n def get_database_object(self, api_endpoint: str | None = None):\n try:\n client = DataAPIClient(environment=self.environment)\n\n return client.get_database(\n api_endpoint or self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n except Exception as e:\n msg = f\"Error fetching database object: {e}\"\n raise ValueError(msg) from e\n\n def collection_data(self, collection_name: str, database: Database | None = None):\n try:\n if not database:\n client = DataAPIClient(environment=self.environment)\n\n database = client.get_database(\n self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n\n collection = database.get_collection(collection_name)\n\n return collection.estimated_document_count()\n except Exception as e: # noqa: BLE001\n self.log(f\"Error checking collection data: {e}\")\n\n return None\n\n def _initialize_database_options(self):\n try:\n return [\n {\n \"name\": name,\n \"status\": info[\"status\"],\n \"collections\": info[\"collections\"],\n \"api_endpoint\": info[\"api_endpoint\"],\n \"keyspaces\": info[\"keyspaces\"],\n \"org_id\": info[\"org_id\"],\n }\n for name, info in self.get_database_list().items()\n ]\n except Exception as e:\n msg = f\"Error fetching database options: {e}\"\n raise ValueError(msg) from e\n\n @classmethod\n def get_provider_icon(cls, collection: CollectionDescriptor | None = None, provider_name: str | None = None) -> str:\n # Get the provider name from the collection\n provider_name = provider_name or (\n collection.definition.vector.service.provider\n if (\n collection\n and collection.definition\n and collection.definition.vector\n and collection.definition.vector.service\n )\n else None\n )\n\n # If there is no provider, use the vector store icon\n if not provider_name or provider_name.lower() == \"bring your own\":\n return \"vectorstores\"\n\n # Map provider casings\n case_map = {\n \"nvidia\": \"NVIDIA\",\n \"openai\": \"OpenAI\",\n \"amazon bedrock\": \"AmazonBedrockEmbeddings\",\n \"azure openai\": \"AzureOpenAiEmbeddings\",\n \"cohere\": \"Cohere\",\n \"jina ai\": \"JinaAI\",\n \"mistral ai\": \"MistralAI\",\n \"upstage\": \"Upstage\",\n \"voyage ai\": \"VoyageAI\",\n }\n\n # Adjust the casing on some like nvidia\n return case_map[provider_name.lower()] if provider_name.lower() in case_map else provider_name.title()\n\n def _initialize_collection_options(self, api_endpoint: str | None = None):\n # Nothing to generate if we don't have an API endpoint yet\n api_endpoint = api_endpoint or self.get_api_endpoint()\n if not api_endpoint:\n return []\n\n # Retrieve the database object\n database = self.get_database_object(api_endpoint=api_endpoint)\n\n # Get the list of collections\n collection_list = database.list_collections(keyspace=self.get_keyspace())\n\n # Return the list of collections and metadata associated\n return [\n {\n \"name\": col.name,\n \"records\": self.collection_data(collection_name=col.name, database=database),\n \"provider\": (\n col.definition.vector.service.provider\n if col.definition.vector and col.definition.vector.service\n else None\n ),\n \"icon\": self.get_provider_icon(collection=col),\n \"model\": (\n col.definition.vector.service.model_name\n if col.definition.vector and col.definition.vector.service\n else None\n ),\n }\n for col in collection_list\n ]\n\n def reset_provider_options(self, build_config: dict) -> dict:\n \"\"\"Reset provider options and related configurations in the build_config dictionary.\"\"\"\n # Extract template path for cleaner access\n template = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n\n # Get vectorize providers\n vectorize_providers_api = self.get_vectorize_providers(\n token=self.token,\n environment=self.environment,\n api_endpoint=build_config[\"api_endpoint\"][\"value\"],\n )\n\n # Create a new dictionary with \"Bring your own\" first\n vectorize_providers: dict[str, list[list[str]]] = {\"Bring your own\": [[], []]}\n\n # Add the remaining items (only Nvidia) from the original dictionary\n vectorize_providers.update(\n {\n k: v\n for k, v in vectorize_providers_api.items()\n if k.lower() in [\"nvidia\"] # TODO: Eventually support more\n }\n )\n\n # Set provider options\n provider_field = \"02_embedding_generation_provider\"\n template[provider_field][\"options\"] = list(vectorize_providers.keys())\n\n # Add metadata for each provider option\n template[provider_field][\"options_metadata\"] = [\n {\"icon\": self.get_provider_icon(provider_name=provider)} for provider in template[provider_field][\"options\"]\n ]\n\n # Get selected embedding provider\n embedding_provider = template[provider_field][\"value\"]\n is_bring_your_own = embedding_provider and embedding_provider == \"Bring your own\"\n\n # Configure embedding model field\n model_field = \"03_embedding_generation_model\"\n template[model_field].update(\n {\n \"options\": vectorize_providers.get(embedding_provider, [[], []])[1],\n \"placeholder\": \"Bring your own\" if is_bring_your_own else None,\n \"readonly\": is_bring_your_own,\n \"required\": not is_bring_your_own,\n \"value\": None,\n }\n )\n\n # If this is a bring your own, set dimensions to 0\n return self.reset_dimension_field(build_config)\n\n def reset_dimension_field(self, build_config: dict) -> dict:\n \"\"\"Reset dimension field options based on provided configuration.\"\"\"\n # Extract template path for cleaner access\n template = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n\n # Get selected embedding model\n provider_field = \"02_embedding_generation_provider\"\n embedding_provider = template[provider_field][\"value\"]\n is_bring_your_own = embedding_provider and embedding_provider == \"Bring your own\"\n\n # Configure dimension field\n dimension_field = \"04_dimension\"\n dimension_value = 1024 if not is_bring_your_own else None # TODO: Dynamically figure this out\n template[dimension_field].update(\n {\n \"placeholder\": dimension_value,\n \"value\": dimension_value,\n \"readonly\": not is_bring_your_own,\n \"required\": is_bring_your_own,\n }\n )\n\n return build_config\n\n def reset_collection_list(self, build_config: dict) -> dict:\n \"\"\"Reset collection list options based on provided configuration.\"\"\"\n # Get collection options\n collection_options = self._initialize_collection_options(api_endpoint=build_config[\"api_endpoint\"][\"value\"])\n # Update collection configuration\n collection_config = build_config[\"collection_name\"]\n collection_config.update(\n {\n \"options\": [col[\"name\"] for col in collection_options],\n \"options_metadata\": [{k: v for k, v in col.items() if k != \"name\"} for col in collection_options],\n }\n )\n\n # Reset selected collection if not in options\n if collection_config[\"value\"] not in collection_config[\"options\"]:\n collection_config[\"value\"] = \"\"\n\n # Set advanced status based on database selection\n collection_config[\"show\"] = bool(build_config[\"database_name\"][\"value\"])\n\n return build_config\n\n def reset_database_list(self, build_config: dict) -> dict:\n \"\"\"Reset database list options and related configurations.\"\"\"\n # Get database options\n database_options = self._initialize_database_options()\n\n # Update cloud provider options\n env = self.environment\n template = build_config[\"database_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n template[\"02_cloud_provider\"][\"options\"] = list(self.map_cloud_providers()[env].keys())\n\n # Update database configuration\n database_config = build_config[\"database_name\"]\n database_config.update(\n {\n \"options\": [db[\"name\"] for db in database_options],\n \"options_metadata\": [{k: v for k, v in db.items() if k != \"name\"} for db in database_options],\n }\n )\n\n # Reset selections if value not in options\n if database_config[\"value\"] not in database_config[\"options\"]:\n database_config[\"value\"] = \"\"\n build_config[\"api_endpoint\"][\"value\"] = \"\"\n build_config[\"collection_name\"][\"show\"] = False\n\n # Set advanced status based on token presence\n database_config[\"show\"] = bool(build_config[\"token\"][\"value\"])\n\n return build_config\n\n def reset_build_config(self, build_config: dict) -> dict:\n \"\"\"Reset all build configuration options to default empty state.\"\"\"\n # Reset database configuration\n database_config = build_config[\"database_name\"]\n database_config.update({\"options\": [], \"options_metadata\": [], \"value\": \"\", \"show\": False})\n build_config[\"api_endpoint\"][\"value\"] = \"\"\n\n # Reset collection configuration\n collection_config = build_config[\"collection_name\"]\n collection_config.update({\"options\": [], \"options_metadata\": [], \"value\": \"\", \"show\": False})\n\n return build_config\n\n def _handle_hybrid_search_options(self, build_config: dict) -> dict:\n \"\"\"Set hybrid search options in the build configuration.\"\"\"\n # Detect what hybrid options are available\n # Get the admin object\n client = DataAPIClient(environment=self.environment)\n admin_client = client.get_admin()\n db_admin = admin_client.get_database_admin(self.get_api_endpoint(), token=self.token)\n\n # We will try to get the reranking providers to see if its hybrid emabled\n try:\n providers = db_admin.find_reranking_providers()\n build_config[\"reranker\"][\"options\"] = [\n model.name for provider_data in providers.reranking_providers.values() for model in provider_data.models\n ]\n build_config[\"reranker\"][\"options_metadata\"] = [\n {\"icon\": self.get_provider_icon(provider_name=model.name.split(\"/\")[0])}\n for provider in providers.reranking_providers.values()\n for model in provider.models\n ]\n build_config[\"reranker\"][\"value\"] = build_config[\"reranker\"][\"options\"][0]\n\n # Set the default search field to hybrid search\n build_config[\"search_method\"][\"show\"] = True\n build_config[\"search_method\"][\"options\"] = [\"Hybrid Search\", \"Vector Search\"]\n build_config[\"search_method\"][\"value\"] = \"Hybrid Search\"\n except Exception as _: # noqa: BLE001\n build_config[\"reranker\"][\"options\"] = []\n build_config[\"reranker\"][\"options_metadata\"] = []\n\n # Set the default search field to vector search\n build_config[\"search_method\"][\"show\"] = False\n build_config[\"search_method\"][\"options\"] = [\"Vector Search\"]\n build_config[\"search_method\"][\"value\"] = \"Vector Search\"\n\n # Set reranker and lexical terms options based on search method\n build_config[\"reranker\"][\"toggle_value\"] = True\n build_config[\"reranker\"][\"show\"] = build_config[\"search_method\"][\"value\"] == \"Hybrid Search\"\n build_config[\"reranker\"][\"toggle_disable\"] = build_config[\"search_method\"][\"value\"] == \"Hybrid Search\"\n if build_config[\"reranker\"][\"show\"]:\n build_config[\"search_type\"][\"value\"] = \"Similarity\"\n\n return build_config\n\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Update build configuration based on field name and value.\"\"\"\n # Early return if no token provided\n if not self.token:\n return self.reset_build_config(build_config)\n\n # Database creation callback\n if field_name == \"database_name\" and isinstance(field_value, dict):\n if \"01_new_database_name\" in field_value:\n await self._create_new_database(build_config, field_value)\n return self.reset_collection_list(build_config)\n return self._update_cloud_regions(build_config, field_value)\n\n # Collection creation callback\n if field_name == \"collection_name\" and isinstance(field_value, dict):\n # Case 1: New collection creation\n if \"01_new_collection_name\" in field_value:\n await self._create_new_collection(build_config, field_value)\n return build_config\n\n # Case 2: Update embedding provider options\n if \"02_embedding_generation_provider\" in field_value:\n return self.reset_provider_options(build_config)\n\n # Case 3: Update dimension field\n if \"03_embedding_generation_model\" in field_value:\n return self.reset_dimension_field(build_config)\n\n # Initial execution or token/environment change\n first_run = field_name == \"collection_name\" and not field_value and not build_config[\"database_name\"][\"options\"]\n if first_run or field_name in {\"token\", \"environment\"}:\n return self.reset_database_list(build_config)\n\n # Database selection change\n if field_name == \"database_name\" and not isinstance(field_value, dict):\n return self._handle_database_selection(build_config, field_value)\n\n # Keyspace selection change\n if field_name == \"keyspace\":\n return self.reset_collection_list(build_config)\n\n # Collection selection change\n if field_name == \"collection_name\" and not isinstance(field_value, dict):\n return self._handle_collection_selection(build_config, field_value)\n\n # Search method selection change\n if field_name == \"search_method\":\n is_vector_search = field_value == \"Vector Search\"\n is_autodetect = build_config[\"autodetect_collection\"][\"value\"]\n\n # Configure lexical terms (same for both cases)\n build_config[\"lexical_terms\"][\"show\"] = not is_vector_search\n build_config[\"lexical_terms\"][\"value\"] = \"\" if is_vector_search else build_config[\"lexical_terms\"][\"value\"]\n\n # Disable reranker disabling if hybrid search is selected\n build_config[\"reranker\"][\"toggle_disable\"] = not is_vector_search\n build_config[\"reranker\"][\"toggle_value\"] = True\n build_config[\"reranker\"][\"value\"] = build_config[\"reranker\"][\"options\"][0]\n\n # Toggle search type and score threshold based on search method\n build_config[\"search_type\"][\"show\"] = is_vector_search\n build_config[\"search_score_threshold\"][\"show\"] = is_vector_search\n\n # Make sure the search_type is set to \"Similarity\"\n if not is_vector_search or is_autodetect:\n build_config[\"search_type\"][\"value\"] = \"Similarity\"\n\n return build_config\n\n async def _create_new_database(self, build_config: dict, field_value: dict) -> None:\n \"\"\"Create a new database and update build config options.\"\"\"\n try:\n await self.create_database_api(\n new_database_name=field_value[\"01_new_database_name\"],\n token=self.token,\n keyspace=self.get_keyspace(),\n environment=self.environment,\n cloud_provider=field_value[\"02_cloud_provider\"],\n region=field_value[\"03_region\"],\n )\n except Exception as e:\n msg = f\"Error creating database: {e}\"\n raise ValueError(msg) from e\n\n build_config[\"database_name\"][\"options\"].append(field_value[\"01_new_database_name\"])\n build_config[\"database_name\"][\"options_metadata\"].append(\n {\n \"status\": \"PENDING\",\n \"collections\": 0,\n \"api_endpoint\": None,\n \"keyspaces\": [self.get_keyspace()],\n \"org_id\": None,\n }\n )\n\n def _update_cloud_regions(self, build_config: dict, field_value: dict) -> dict:\n \"\"\"Update cloud provider regions in build config.\"\"\"\n env = self.environment\n cloud_provider = field_value[\"02_cloud_provider\"]\n\n # Update the region options based on the selected cloud provider\n template = build_config[\"database_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n template[\"03_region\"][\"options\"] = self.map_cloud_providers()[env][cloud_provider][\"regions\"]\n\n # Reset the the 03_region value if it's not in the new options\n if template[\"03_region\"][\"value\"] not in template[\"03_region\"][\"options\"]:\n template[\"03_region\"][\"value\"] = None\n\n return build_config\n\n async def _create_new_collection(self, build_config: dict, field_value: dict) -> None:\n \"\"\"Create a new collection and update build config options.\"\"\"\n embedding_provider = field_value.get(\"02_embedding_generation_provider\")\n try:\n await self.create_collection_api(\n new_collection_name=field_value[\"01_new_collection_name\"],\n token=self.token,\n api_endpoint=build_config[\"api_endpoint\"][\"value\"],\n environment=self.environment,\n keyspace=self.get_keyspace(),\n dimension=field_value.get(\"04_dimension\") if embedding_provider == \"Bring your own\" else None,\n embedding_generation_provider=embedding_provider,\n embedding_generation_model=field_value.get(\"03_embedding_generation_model\"),\n reranker=self.reranker,\n )\n except Exception as e:\n msg = f\"Error creating collection: {e}\"\n raise ValueError(msg) from e\n\n provider = embedding_provider.lower() if embedding_provider and embedding_provider != \"Bring your own\" else None\n build_config[\"collection_name\"].update(\n {\n \"value\": field_value[\"01_new_collection_name\"],\n \"options\": build_config[\"collection_name\"][\"options\"] + [field_value[\"01_new_collection_name\"]],\n }\n )\n build_config[\"embedding_model\"][\"show\"] = not bool(provider)\n build_config[\"embedding_model\"][\"required\"] = not bool(provider)\n build_config[\"collection_name\"][\"options_metadata\"].append(\n {\n \"records\": 0,\n \"provider\": provider,\n \"icon\": self.get_provider_icon(provider_name=provider),\n \"model\": field_value.get(\"03_embedding_generation_model\"),\n }\n )\n\n # Make sure we always show the reranker options if the collection is hybrid enabled\n # And right now they always are\n build_config[\"lexical_terms\"][\"show\"] = True\n\n def _handle_database_selection(self, build_config: dict, field_value: str) -> dict:\n \"\"\"Handle database selection and update related configurations.\"\"\"\n build_config = self.reset_database_list(build_config)\n\n # Reset collection list if database selection changes\n if field_value not in build_config[\"database_name\"][\"options\"]:\n build_config[\"database_name\"][\"value\"] = \"\"\n return build_config\n\n # Get the api endpoint for the selected database\n index = build_config[\"database_name\"][\"options\"].index(field_value)\n build_config[\"api_endpoint\"][\"value\"] = build_config[\"database_name\"][\"options_metadata\"][index][\"api_endpoint\"]\n\n # Get the org_id for the selected database\n org_id = build_config[\"database_name\"][\"options_metadata\"][index][\"org_id\"]\n if not org_id:\n return build_config\n\n # Update the list of keyspaces based on the db info\n build_config[\"keyspace\"][\"options\"] = build_config[\"database_name\"][\"options_metadata\"][index][\"keyspaces\"]\n build_config[\"keyspace\"][\"value\"] = (\n build_config[\"keyspace\"][\"options\"] and build_config[\"keyspace\"][\"options\"][0]\n if build_config[\"keyspace\"][\"value\"] not in build_config[\"keyspace\"][\"options\"]\n else build_config[\"keyspace\"][\"value\"]\n )\n\n # Get the database id for the selected database\n db_id = self.get_database_id_static(api_endpoint=build_config[\"api_endpoint\"][\"value\"])\n keyspace = self.get_keyspace()\n\n # Update the helper text for the embedding provider field\n template = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n template[\"02_embedding_generation_provider\"][\"helper_text\"] = (\n \"To create collections with more embedding provider options, go to \"\n f''\n \"your database in Astra DB.\"\n )\n\n # Reset provider options\n build_config = self.reset_provider_options(build_config)\n\n # Handle hybrid search options\n build_config = self._handle_hybrid_search_options(build_config)\n\n return self.reset_collection_list(build_config)\n\n def _handle_collection_selection(self, build_config: dict, field_value: str) -> dict:\n \"\"\"Handle collection selection and update embedding options.\"\"\"\n build_config[\"autodetect_collection\"][\"value\"] = True\n build_config = self.reset_collection_list(build_config)\n\n # Reset embedding model if collection selection changes\n if field_value and field_value not in build_config[\"collection_name\"][\"options\"]:\n build_config[\"collection_name\"][\"options\"].append(field_value)\n build_config[\"collection_name\"][\"options_metadata\"].append(\n {\n \"records\": 0,\n \"provider\": None,\n \"icon\": \"vectorstores\",\n \"model\": None,\n }\n )\n build_config[\"autodetect_collection\"][\"value\"] = False\n\n if not field_value:\n return build_config\n\n # Get the selected collection index\n index = build_config[\"collection_name\"][\"options\"].index(field_value)\n\n # Set the provider of the selected collection\n provider = build_config[\"collection_name\"][\"options_metadata\"][index][\"provider\"]\n build_config[\"embedding_model\"][\"show\"] = not bool(provider)\n build_config[\"embedding_model\"][\"required\"] = not bool(provider)\n\n # Grab the collection object\n database = self.get_database_object(api_endpoint=build_config[\"api_endpoint\"][\"value\"])\n collection = database.get_collection(\n name=field_value,\n keyspace=build_config[\"keyspace\"][\"value\"],\n )\n\n # Check if hybrid and lexical are enabled\n col_options = collection.options()\n hyb_enabled = col_options.rerank and col_options.rerank.enabled\n lex_enabled = col_options.lexical and col_options.lexical.enabled\n user_hyb_enabled = build_config[\"search_method\"][\"value\"] == \"Hybrid Search\"\n\n # Show lexical terms if the collection is hybrid enabled\n build_config[\"lexical_terms\"][\"show\"] = hyb_enabled and lex_enabled and user_hyb_enabled\n\n return build_config\n\n @check_cached_vector_store\n def build_vector_store(self):\n try:\n from langchain_astradb import AstraDBVectorStore\n except ImportError as e:\n msg = (\n \"Could not import langchain Astra DB integration package. \"\n \"Please install it with `pip install langchain-astradb`.\"\n )\n raise ImportError(msg) from e\n\n # Get the embedding model and additional params\n embedding_params = {\"embedding\": self.embedding_model} if self.embedding_model else {}\n\n # Get the additional parameters\n additional_params = self.astradb_vectorstore_kwargs or {}\n\n # Get Langflow version and platform information\n __version__ = get_version_info()[\"version\"]\n langflow_prefix = \"\"\n # if os.getenv(\"AWS_EXECUTION_ENV\") == \"AWS_ECS_FARGATE\": # TODO: More precise way of detecting\n # langflow_prefix = \"ds-\"\n\n # Get the database object\n database = self.get_database_object()\n autodetect = self.collection_name in database.list_collection_names() and self.autodetect_collection\n\n # Bundle up the auto-detect parameters\n autodetect_params = {\n \"autodetect_collection\": autodetect,\n \"content_field\": (\n self.content_field\n if self.content_field and embedding_params\n else (\n \"page_content\"\n if embedding_params\n and self.collection_data(collection_name=self.collection_name, database=database) == 0\n else None\n )\n ),\n \"ignore_invalid_documents\": self.ignore_invalid_documents,\n }\n\n # Choose HybridSearchMode based on the selected param\n hybrid_search_mode = HybridSearchMode.DEFAULT if self.search_method == \"Hybrid Search\" else HybridSearchMode.OFF\n\n # Attempt to build the Vector Store object\n try:\n vector_store = AstraDBVectorStore(\n # Astra DB Authentication Parameters\n token=self.token,\n api_endpoint=database.api_endpoint,\n namespace=database.keyspace,\n collection_name=self.collection_name,\n environment=self.environment,\n # Hybrid Search Parameters\n hybrid_search=hybrid_search_mode,\n # Astra DB Usage Tracking Parameters\n ext_callers=[(f\"{langflow_prefix}langflow\", __version__)],\n # Astra DB Vector Store Parameters\n **autodetect_params,\n **embedding_params,\n **additional_params,\n )\n except Exception as e:\n msg = f\"Error initializing AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n # Add documents to the vector store\n self._add_documents_to_vector_store(vector_store)\n\n return vector_store\n\n def _add_documents_to_vector_store(self, vector_store) -> None:\n self.ingest_data = self._prepare_ingest_data()\n\n documents = []\n for _input in self.ingest_data or []:\n if isinstance(_input, Data):\n documents.append(_input.to_lc_document())\n else:\n msg = \"Vector Store Inputs must be Data objects.\"\n raise TypeError(msg)\n\n if documents and self.deletion_field:\n self.log(f\"Deleting documents where {self.deletion_field}\")\n try:\n database = self.get_database_object()\n collection = database.get_collection(self.collection_name, keyspace=database.keyspace)\n delete_values = list({doc.metadata[self.deletion_field] for doc in documents})\n self.log(f\"Deleting documents where {self.deletion_field} matches {delete_values}.\")\n collection.delete_many({f\"metadata.{self.deletion_field}\": {\"$in\": delete_values}})\n except Exception as e:\n msg = f\"Error deleting documents from AstraDBVectorStore based on '{self.deletion_field}': {e}\"\n raise ValueError(msg) from e\n\n if documents:\n self.log(f\"Adding {len(documents)} documents to the Vector Store.\")\n try:\n vector_store.add_documents(documents)\n except Exception as e:\n msg = f\"Error adding documents to AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n else:\n self.log(\"No documents to add to the Vector Store.\")\n\n def _map_search_type(self) -> str:\n search_type_mapping = {\n \"Similarity with score threshold\": \"similarity_score_threshold\",\n \"MMR (Max Marginal Relevance)\": \"mmr\",\n }\n\n return search_type_mapping.get(self.search_type, \"similarity\")\n\n def _build_search_args(self):\n # Clean up the search query\n query = self.search_query if isinstance(self.search_query, str) and self.search_query.strip() else None\n lexical_terms = self.lexical_terms or None\n\n # Check if we have a search query, and if so set the args\n if query:\n args = {\n \"query\": query,\n \"search_type\": self._map_search_type(),\n \"k\": self.number_of_results,\n \"score_threshold\": self.search_score_threshold,\n \"lexical_query\": lexical_terms,\n }\n elif self.advanced_search_filter:\n args = {\n \"n\": self.number_of_results,\n }\n else:\n return {}\n\n filter_arg = self.advanced_search_filter or {}\n if filter_arg:\n args[\"filter\"] = filter_arg\n\n return args\n\n def search_documents(self, vector_store=None) -> list[Data]:\n vector_store = vector_store or self.build_vector_store()\n\n self.log(f\"Search input: {self.search_query}\")\n self.log(f\"Search type: {self.search_type}\")\n self.log(f\"Number of results: {self.number_of_results}\")\n self.log(f\"store.hybrid_search: {vector_store.hybrid_search}\")\n self.log(f\"Lexical terms: {self.lexical_terms}\")\n self.log(f\"Reranker: {self.reranker}\")\n\n try:\n search_args = self._build_search_args()\n except Exception as e:\n msg = f\"Error in AstraDBVectorStore._build_search_args: {e}\"\n raise ValueError(msg) from e\n\n if not search_args:\n self.log(\"No search input or filters provided. Skipping search.\")\n return []\n\n docs = []\n search_method = \"search\" if \"query\" in search_args else \"metadata_search\"\n\n try:\n self.log(f\"Calling vector_store.{search_method} with args: {search_args}\")\n docs = getattr(vector_store, search_method)(**search_args)\n except Exception as e:\n msg = f\"Error performing {search_method} in AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n self.log(f\"Retrieved documents: {len(docs)}\")\n\n data = docs_to_data(docs)\n self.log(f\"Converted documents to data: {len(data)}\")\n self.status = data\n\n return data\n\n def get_retriever_kwargs(self):\n search_args = self._build_search_args()\n\n return {\n \"search_type\": self._map_search_type(),\n \"search_kwargs\": search_args,\n }\n"
+ "value": "import re\nfrom collections import defaultdict\nfrom dataclasses import asdict, dataclass, field\n\nfrom astrapy import DataAPIClient, Database\nfrom astrapy.data.info.reranking import RerankServiceOptions\nfrom astrapy.info import CollectionDescriptor, CollectionLexicalOptions, CollectionRerankOptions\nfrom langchain_astradb import AstraDBVectorStore, VectorServiceOptions\nfrom langchain_astradb.utils.astradb import HybridSearchMode, _AstraDBCollectionEnvironment\n\nfrom langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom langflow.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom langflow.helpers.data import docs_to_data\nfrom langflow.inputs.inputs import FloatInput, NestedDictInput\nfrom langflow.io import (\n BoolInput,\n DropdownInput,\n HandleInput,\n IntInput,\n QueryInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema.data import Data\nfrom langflow.utils.version import get_version_info\n\n\n@vector_store_connection\nclass AstraDBVectorStoreComponent(LCVectorStoreComponent):\n display_name: str = \"Astra DB\"\n description: str = \"Ingest and search documents in Astra DB\"\n documentation: str = \"https://docs.datastax.com/en/langflow/astra-components.html\"\n name = \"AstraDB\"\n icon: str = \"AstraDB\"\n\n _cached_vector_store: AstraDBVectorStore | None = None\n\n @dataclass\n class NewDatabaseInput:\n functionality: str = \"create\"\n fields: dict[str, dict] = field(\n default_factory=lambda: {\n \"data\": {\n \"node\": {\n \"name\": \"create_database\",\n \"description\": \"Please allow several minutes for creation to complete.\",\n \"display_name\": \"Create new database\",\n \"field_order\": [\"01_new_database_name\", \"02_cloud_provider\", \"03_region\"],\n \"template\": {\n \"01_new_database_name\": StrInput(\n name=\"new_database_name\",\n display_name=\"Name\",\n info=\"Name of the new database to create in Astra DB.\",\n required=True,\n ),\n \"02_cloud_provider\": DropdownInput(\n name=\"cloud_provider\",\n display_name=\"Cloud provider\",\n info=\"Cloud provider for the new database.\",\n options=[],\n required=True,\n real_time_refresh=True,\n ),\n \"03_region\": DropdownInput(\n name=\"region\",\n display_name=\"Region\",\n info=\"Region for the new database.\",\n options=[],\n required=True,\n ),\n },\n },\n }\n }\n )\n\n @dataclass\n class NewCollectionInput:\n functionality: str = \"create\"\n fields: dict[str, dict] = field(\n default_factory=lambda: {\n \"data\": {\n \"node\": {\n \"name\": \"create_collection\",\n \"description\": \"Please allow several seconds for creation to complete.\",\n \"display_name\": \"Create new collection\",\n \"field_order\": [\n \"01_new_collection_name\",\n \"02_embedding_generation_provider\",\n \"03_embedding_generation_model\",\n \"04_dimension\",\n ],\n \"template\": {\n \"01_new_collection_name\": StrInput(\n name=\"new_collection_name\",\n display_name=\"Name\",\n info=\"Name of the new collection to create in Astra DB.\",\n required=True,\n ),\n \"02_embedding_generation_provider\": DropdownInput(\n name=\"embedding_generation_provider\",\n display_name=\"Embedding generation method\",\n info=\"Provider to use for generating embeddings.\",\n helper_text=(\n \"To create collections with more embedding provider options, go to \"\n 'your database in Astra DB'\n ),\n real_time_refresh=True,\n required=True,\n options=[],\n ),\n \"03_embedding_generation_model\": DropdownInput(\n name=\"embedding_generation_model\",\n display_name=\"Embedding model\",\n info=\"Model to use for generating embeddings.\",\n real_time_refresh=True,\n options=[],\n ),\n \"04_dimension\": IntInput(\n name=\"dimension\",\n display_name=\"Dimensions\",\n info=\"Dimensions of the embeddings to generate.\",\n value=None,\n ),\n },\n },\n }\n }\n )\n\n inputs = [\n SecretStrInput(\n name=\"token\",\n display_name=\"Astra DB Application Token\",\n info=\"Authentication token for accessing Astra DB.\",\n value=\"ASTRA_DB_APPLICATION_TOKEN\",\n required=True,\n real_time_refresh=True,\n input_types=[],\n ),\n DropdownInput(\n name=\"environment\",\n display_name=\"Environment\",\n info=\"The environment for the Astra DB API Endpoint.\",\n options=[\"prod\", \"test\", \"dev\"],\n value=\"prod\",\n advanced=True,\n real_time_refresh=True,\n combobox=True,\n ),\n DropdownInput(\n name=\"database_name\",\n display_name=\"Database\",\n info=\"The Database name for the Astra DB instance.\",\n required=True,\n refresh_button=True,\n real_time_refresh=True,\n dialog_inputs=asdict(NewDatabaseInput()),\n combobox=True,\n ),\n StrInput(\n name=\"api_endpoint\",\n display_name=\"Astra DB API Endpoint\",\n info=\"The API Endpoint for the Astra DB instance. Supercedes database selection.\",\n show=False,\n ),\n DropdownInput(\n name=\"keyspace\",\n display_name=\"Keyspace\",\n info=\"Optional keyspace within Astra DB to use for the collection.\",\n advanced=True,\n options=[],\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"collection_name\",\n display_name=\"Collection\",\n info=\"The name of the collection within Astra DB where the vectors will be stored.\",\n required=True,\n refresh_button=True,\n real_time_refresh=True,\n dialog_inputs=asdict(NewCollectionInput()),\n combobox=True,\n show=False,\n ),\n HandleInput(\n name=\"embedding_model\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Specify the Embedding Model. Not required for Astra Vectorize collections.\",\n required=False,\n show=False,\n ),\n *LCVectorStoreComponent.inputs,\n DropdownInput(\n name=\"search_method\",\n display_name=\"Search Method\",\n info=(\n \"Determine how your content is matched: Vector finds semantic similarity, \"\n \"and Hybrid Search (suggested) combines both approaches \"\n \"with a reranker.\"\n ),\n options=[\"Hybrid Search\", \"Vector Search\"], # TODO: Restore Lexical Search?\n options_metadata=[{\"icon\": \"SearchHybrid\"}, {\"icon\": \"SearchVector\"}],\n value=\"Vector Search\",\n advanced=True,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"reranker\",\n display_name=\"Reranker\",\n info=\"Post-retrieval model that re-scores results for optimal relevance ranking.\",\n show=False,\n toggle=True,\n ),\n QueryInput(\n name=\"lexical_terms\",\n display_name=\"Lexical Terms\",\n info=\"Add additional terms/keywords to augment search precision.\",\n placeholder=\"Enter terms to search...\",\n separator=\" \",\n show=False,\n value=\"\",\n advanced=True,\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Number of Search Results\",\n info=\"Number of search results to return.\",\n advanced=True,\n value=4,\n ),\n DropdownInput(\n name=\"search_type\",\n display_name=\"Search Type\",\n info=\"Search type to use\",\n options=[\"Similarity\", \"Similarity with score threshold\", \"MMR (Max Marginal Relevance)\"],\n value=\"Similarity\",\n advanced=True,\n ),\n FloatInput(\n name=\"search_score_threshold\",\n display_name=\"Search Score Threshold\",\n info=\"Minimum similarity score threshold for search results. \"\n \"(when using 'Similarity with score threshold')\",\n value=0,\n advanced=True,\n ),\n NestedDictInput(\n name=\"advanced_search_filter\",\n display_name=\"Search Metadata Filter\",\n info=\"Optional dictionary of filters to apply to the search query.\",\n advanced=True,\n ),\n BoolInput(\n name=\"autodetect_collection\",\n display_name=\"Autodetect Collection\",\n info=\"Boolean flag to determine whether to autodetect the collection.\",\n advanced=True,\n value=True,\n ),\n StrInput(\n name=\"content_field\",\n display_name=\"Content Field\",\n info=\"Field to use as the text content field for the vector store.\",\n advanced=True,\n ),\n StrInput(\n name=\"deletion_field\",\n display_name=\"Deletion Based On Field\",\n info=\"When this parameter is provided, documents in the target collection with \"\n \"metadata field values matching the input metadata field value will be deleted \"\n \"before new data is loaded.\",\n advanced=True,\n ),\n BoolInput(\n name=\"ignore_invalid_documents\",\n display_name=\"Ignore Invalid Documents\",\n info=\"Boolean flag to determine whether to ignore invalid documents at runtime.\",\n advanced=True,\n ),\n NestedDictInput(\n name=\"astradb_vectorstore_kwargs\",\n display_name=\"AstraDBVectorStore Parameters\",\n info=\"Optional dictionary of additional parameters for the AstraDBVectorStore.\",\n advanced=True,\n ),\n ]\n\n @classmethod\n def map_cloud_providers(cls):\n # TODO: Programmatically fetch the regions for each cloud provider\n return {\n \"dev\": {\n \"Amazon Web Services\": {\n \"id\": \"aws\",\n \"regions\": [\"us-west-2\"],\n },\n \"Google Cloud Platform\": {\n \"id\": \"gcp\",\n \"regions\": [\"us-central1\", \"europe-west4\"],\n },\n },\n \"test\": {\n \"Google Cloud Platform\": {\n \"id\": \"gcp\",\n \"regions\": [\"us-central1\"],\n },\n },\n \"prod\": {\n \"Amazon Web Services\": {\n \"id\": \"aws\",\n \"regions\": [\"us-east-2\", \"ap-south-1\", \"eu-west-1\"],\n },\n \"Google Cloud Platform\": {\n \"id\": \"gcp\",\n \"regions\": [\"us-east1\"],\n },\n \"Microsoft Azure\": {\n \"id\": \"azure\",\n \"regions\": [\"westus3\"],\n },\n },\n }\n\n @classmethod\n def get_vectorize_providers(cls, token: str, environment: str | None = None, api_endpoint: str | None = None):\n try:\n # Get the admin object\n client = DataAPIClient(environment=environment)\n admin_client = client.get_admin()\n db_admin = admin_client.get_database_admin(api_endpoint, token=token)\n\n # Get the list of embedding providers\n embedding_providers = db_admin.find_embedding_providers()\n\n vectorize_providers_mapping = {}\n # Map the provider display name to the provider key and models\n for provider_key, provider_data in embedding_providers.embedding_providers.items():\n # Get the provider display name and models\n display_name = provider_data.display_name\n models = [model.name for model in provider_data.models]\n\n # Build our mapping\n vectorize_providers_mapping[display_name] = [provider_key, models]\n\n # Sort the resulting dictionary\n return defaultdict(list, dict(sorted(vectorize_providers_mapping.items())))\n except Exception as _: # noqa: BLE001\n return {}\n\n @classmethod\n async def create_database_api(\n cls,\n new_database_name: str,\n cloud_provider: str,\n region: str,\n token: str,\n environment: str | None = None,\n keyspace: str | None = None,\n ):\n client = DataAPIClient(environment=environment)\n\n # Get the admin object\n admin_client = client.get_admin(token=token)\n\n # Get the environment, set to prod if null like\n my_env = environment or \"prod\"\n\n # Raise a value error if name isn't provided\n if not new_database_name:\n msg = \"Database name is required to create a new database.\"\n raise ValueError(msg)\n\n # Call the create database function\n return await admin_client.async_create_database(\n name=new_database_name,\n cloud_provider=cls.map_cloud_providers()[my_env][cloud_provider][\"id\"],\n region=region,\n keyspace=keyspace,\n wait_until_active=False,\n )\n\n @classmethod\n async def create_collection_api(\n cls,\n new_collection_name: str,\n token: str,\n api_endpoint: str,\n environment: str | None = None,\n keyspace: str | None = None,\n dimension: int | None = None,\n embedding_generation_provider: str | None = None,\n embedding_generation_model: str | None = None,\n reranker: str | None = None,\n ):\n # Build vectorize options, if needed\n vectorize_options = None\n if not dimension:\n providers = cls.get_vectorize_providers(token=token, environment=environment, api_endpoint=api_endpoint)\n vectorize_options = VectorServiceOptions(\n provider=providers.get(embedding_generation_provider, [None, []])[0],\n model_name=embedding_generation_model,\n )\n\n # Raise a value error if name isn't provided\n if not new_collection_name:\n msg = \"Collection name is required to create a new collection.\"\n raise ValueError(msg)\n\n # Define the base arguments being passed to the create collection function\n base_args = {\n \"collection_name\": new_collection_name,\n \"token\": token,\n \"api_endpoint\": api_endpoint,\n \"keyspace\": keyspace,\n \"environment\": environment,\n \"embedding_dimension\": dimension,\n \"collection_vector_service_options\": vectorize_options,\n }\n\n # Add optional arguments if the reranker is set\n if reranker:\n # Split the reranker field into a provider a model name\n provider, _ = reranker.split(\"/\")\n base_args[\"collection_rerank\"] = CollectionRerankOptions(\n service=RerankServiceOptions(provider=provider, model_name=reranker),\n )\n base_args[\"collection_lexical\"] = CollectionLexicalOptions(analyzer=\"STANDARD\")\n\n _AstraDBCollectionEnvironment(**base_args)\n\n @classmethod\n def get_database_list_static(cls, token: str, environment: str | None = None):\n client = DataAPIClient(environment=environment)\n\n # Get the admin object\n admin_client = client.get_admin(token=token)\n\n # Get the list of databases\n db_list = admin_client.list_databases()\n\n # Generate the api endpoint for each database\n db_info_dict = {}\n for db in db_list:\n try:\n # Get the API endpoint for the database\n api_endpoint = db.regions[0].api_endpoint\n\n # Get the number of collections\n try:\n # Get the number of collections in the database\n num_collections = len(\n client.get_database(\n api_endpoint,\n token=token,\n ).list_collection_names()\n )\n except Exception: # noqa: BLE001\n if db.status != \"PENDING\":\n continue\n num_collections = 0\n\n # Add the database to the dictionary\n db_info_dict[db.name] = {\n \"api_endpoint\": api_endpoint,\n \"keyspaces\": db.keyspaces,\n \"collections\": num_collections,\n \"status\": db.status if db.status != \"ACTIVE\" else None,\n \"org_id\": db.org_id if db.org_id else None,\n }\n except Exception: # noqa: BLE001, S110\n pass\n\n return db_info_dict\n\n def get_database_list(self):\n return self.get_database_list_static(\n token=self.token,\n environment=self.environment,\n )\n\n @classmethod\n def get_api_endpoint_static(\n cls,\n token: str,\n environment: str | None = None,\n api_endpoint: str | None = None,\n database_name: str | None = None,\n ):\n # If the api_endpoint is set, return it\n if api_endpoint:\n return api_endpoint\n\n # Check if the database_name is like a url\n if database_name and database_name.startswith(\"https://\"):\n return database_name\n\n # If the database is not set, nothing we can do.\n if not database_name:\n return None\n\n # Grab the database object\n db = cls.get_database_list_static(token=token, environment=environment).get(database_name)\n if not db:\n return None\n\n # Otherwise, get the URL from the database list\n return db.get(\"api_endpoint\")\n\n def get_api_endpoint(self):\n return self.get_api_endpoint_static(\n token=self.token,\n environment=self.environment,\n api_endpoint=self.api_endpoint,\n database_name=self.database_name,\n )\n\n @classmethod\n def get_database_id_static(cls, api_endpoint: str) -> str | None:\n # Pattern matches standard UUID format: 8-4-4-4-12 hexadecimal characters\n uuid_pattern = r\"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}\"\n match = re.search(uuid_pattern, api_endpoint)\n\n return match.group(0) if match else None\n\n def get_database_id(self):\n return self.get_database_id_static(api_endpoint=self.get_api_endpoint())\n\n def get_keyspace(self):\n keyspace = self.keyspace\n\n if keyspace:\n return keyspace.strip()\n\n return \"default_keyspace\"\n\n def get_database_object(self, api_endpoint: str | None = None):\n try:\n client = DataAPIClient(environment=self.environment)\n\n return client.get_database(\n api_endpoint or self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n except Exception as e:\n msg = f\"Error fetching database object: {e}\"\n raise ValueError(msg) from e\n\n def collection_data(self, collection_name: str, database: Database | None = None):\n try:\n if not database:\n client = DataAPIClient(environment=self.environment)\n\n database = client.get_database(\n self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n\n collection = database.get_collection(collection_name)\n\n return collection.estimated_document_count()\n except Exception as e: # noqa: BLE001\n self.log(f\"Error checking collection data: {e}\")\n\n return None\n\n def _initialize_database_options(self):\n try:\n return [\n {\n \"name\": name,\n \"status\": info[\"status\"],\n \"collections\": info[\"collections\"],\n \"api_endpoint\": info[\"api_endpoint\"],\n \"keyspaces\": info[\"keyspaces\"],\n \"org_id\": info[\"org_id\"],\n }\n for name, info in self.get_database_list().items()\n ]\n except Exception as e:\n msg = f\"Error fetching database options: {e}\"\n raise ValueError(msg) from e\n\n @classmethod\n def get_provider_icon(cls, collection: CollectionDescriptor | None = None, provider_name: str | None = None) -> str:\n # Get the provider name from the collection\n provider_name = provider_name or (\n collection.definition.vector.service.provider\n if (\n collection\n and collection.definition\n and collection.definition.vector\n and collection.definition.vector.service\n )\n else None\n )\n\n # If there is no provider, use the vector store icon\n if not provider_name or provider_name.lower() == \"bring your own\":\n return \"vectorstores\"\n\n # Map provider casings\n case_map = {\n \"nvidia\": \"NVIDIA\",\n \"openai\": \"OpenAI\",\n \"amazon bedrock\": \"AmazonBedrockEmbeddings\",\n \"azure openai\": \"AzureOpenAiEmbeddings\",\n \"cohere\": \"Cohere\",\n \"jina ai\": \"JinaAI\",\n \"mistral ai\": \"MistralAI\",\n \"upstage\": \"Upstage\",\n \"voyage ai\": \"VoyageAI\",\n }\n\n # Adjust the casing on some like nvidia\n return case_map[provider_name.lower()] if provider_name.lower() in case_map else provider_name.title()\n\n def _initialize_collection_options(self, api_endpoint: str | None = None):\n # Nothing to generate if we don't have an API endpoint yet\n api_endpoint = api_endpoint or self.get_api_endpoint()\n if not api_endpoint:\n return []\n\n # Retrieve the database object\n database = self.get_database_object(api_endpoint=api_endpoint)\n\n # Get the list of collections\n collection_list = database.list_collections(keyspace=self.get_keyspace())\n\n # Return the list of collections and metadata associated\n return [\n {\n \"name\": col.name,\n \"records\": self.collection_data(collection_name=col.name, database=database),\n \"provider\": (\n col.definition.vector.service.provider\n if col.definition.vector and col.definition.vector.service\n else None\n ),\n \"icon\": self.get_provider_icon(collection=col),\n \"model\": (\n col.definition.vector.service.model_name\n if col.definition.vector and col.definition.vector.service\n else None\n ),\n }\n for col in collection_list\n ]\n\n def reset_provider_options(self, build_config: dict) -> dict:\n \"\"\"Reset provider options and related configurations in the build_config dictionary.\"\"\"\n # Extract template path for cleaner access\n template = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n\n # Get vectorize providers\n vectorize_providers_api = self.get_vectorize_providers(\n token=self.token,\n environment=self.environment,\n api_endpoint=build_config[\"api_endpoint\"][\"value\"],\n )\n\n # Create a new dictionary with \"Bring your own\" first\n vectorize_providers: dict[str, list[list[str]]] = {\"Bring your own\": [[], []]}\n\n # Add the remaining items (only Nvidia) from the original dictionary\n vectorize_providers.update(\n {\n k: v\n for k, v in vectorize_providers_api.items()\n if k.lower() in [\"nvidia\"] # TODO: Eventually support more\n }\n )\n\n # Set provider options\n provider_field = \"02_embedding_generation_provider\"\n template[provider_field][\"options\"] = list(vectorize_providers.keys())\n\n # Add metadata for each provider option\n template[provider_field][\"options_metadata\"] = [\n {\"icon\": self.get_provider_icon(provider_name=provider)} for provider in template[provider_field][\"options\"]\n ]\n\n # Get selected embedding provider\n embedding_provider = template[provider_field][\"value\"]\n is_bring_your_own = embedding_provider and embedding_provider == \"Bring your own\"\n\n # Configure embedding model field\n model_field = \"03_embedding_generation_model\"\n template[model_field].update(\n {\n \"options\": vectorize_providers.get(embedding_provider, [[], []])[1],\n \"placeholder\": \"Bring your own\" if is_bring_your_own else None,\n \"readonly\": is_bring_your_own,\n \"required\": not is_bring_your_own,\n \"value\": None,\n }\n )\n\n # If this is a bring your own, set dimensions to 0\n return self.reset_dimension_field(build_config)\n\n def reset_dimension_field(self, build_config: dict) -> dict:\n \"\"\"Reset dimension field options based on provided configuration.\"\"\"\n # Extract template path for cleaner access\n template = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n\n # Get selected embedding model\n provider_field = \"02_embedding_generation_provider\"\n embedding_provider = template[provider_field][\"value\"]\n is_bring_your_own = embedding_provider and embedding_provider == \"Bring your own\"\n\n # Configure dimension field\n dimension_field = \"04_dimension\"\n dimension_value = 1024 if not is_bring_your_own else None # TODO: Dynamically figure this out\n template[dimension_field].update(\n {\n \"placeholder\": dimension_value,\n \"value\": dimension_value,\n \"readonly\": not is_bring_your_own,\n \"required\": is_bring_your_own,\n }\n )\n\n return build_config\n\n def reset_collection_list(self, build_config: dict) -> dict:\n \"\"\"Reset collection list options based on provided configuration.\"\"\"\n # Get collection options\n collection_options = self._initialize_collection_options(api_endpoint=build_config[\"api_endpoint\"][\"value\"])\n # Update collection configuration\n collection_config = build_config[\"collection_name\"]\n collection_config.update(\n {\n \"options\": [col[\"name\"] for col in collection_options],\n \"options_metadata\": [{k: v for k, v in col.items() if k != \"name\"} for col in collection_options],\n }\n )\n\n # Reset selected collection if not in options\n if collection_config[\"value\"] not in collection_config[\"options\"]:\n collection_config[\"value\"] = \"\"\n\n # Set advanced status based on database selection\n collection_config[\"show\"] = bool(build_config[\"database_name\"][\"value\"])\n\n return build_config\n\n def reset_database_list(self, build_config: dict) -> dict:\n \"\"\"Reset database list options and related configurations.\"\"\"\n # Get database options\n database_options = self._initialize_database_options()\n\n # Update cloud provider options\n env = self.environment\n template = build_config[\"database_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n template[\"02_cloud_provider\"][\"options\"] = list(self.map_cloud_providers()[env].keys())\n\n # Update database configuration\n database_config = build_config[\"database_name\"]\n database_config.update(\n {\n \"options\": [db[\"name\"] for db in database_options],\n \"options_metadata\": [{k: v for k, v in db.items() if k != \"name\"} for db in database_options],\n }\n )\n\n # Reset selections if value not in options\n if database_config[\"value\"] not in database_config[\"options\"]:\n database_config[\"value\"] = \"\"\n build_config[\"api_endpoint\"][\"value\"] = \"\"\n build_config[\"collection_name\"][\"show\"] = False\n\n # Set advanced status based on token presence\n database_config[\"show\"] = bool(build_config[\"token\"][\"value\"])\n\n return build_config\n\n def reset_build_config(self, build_config: dict) -> dict:\n \"\"\"Reset all build configuration options to default empty state.\"\"\"\n # Reset database configuration\n database_config = build_config[\"database_name\"]\n database_config.update({\"options\": [], \"options_metadata\": [], \"value\": \"\", \"show\": False})\n build_config[\"api_endpoint\"][\"value\"] = \"\"\n\n # Reset collection configuration\n collection_config = build_config[\"collection_name\"]\n collection_config.update({\"options\": [], \"options_metadata\": [], \"value\": \"\", \"show\": False})\n\n return build_config\n\n def _handle_hybrid_search_options(self, build_config: dict) -> dict:\n \"\"\"Set hybrid search options in the build configuration.\"\"\"\n # Detect what hybrid options are available\n # Get the admin object\n client = DataAPIClient(environment=self.environment)\n admin_client = client.get_admin()\n db_admin = admin_client.get_database_admin(self.get_api_endpoint(), token=self.token)\n\n # We will try to get the reranking providers to see if its hybrid emabled\n try:\n providers = db_admin.find_reranking_providers()\n build_config[\"reranker\"][\"options\"] = [\n model.name for provider_data in providers.reranking_providers.values() for model in provider_data.models\n ]\n build_config[\"reranker\"][\"options_metadata\"] = [\n {\"icon\": self.get_provider_icon(provider_name=model.name.split(\"/\")[0])}\n for provider in providers.reranking_providers.values()\n for model in provider.models\n ]\n build_config[\"reranker\"][\"value\"] = build_config[\"reranker\"][\"options\"][0]\n\n # Set the default search field to hybrid search\n build_config[\"search_method\"][\"show\"] = True\n build_config[\"search_method\"][\"options\"] = [\"Hybrid Search\", \"Vector Search\"]\n build_config[\"search_method\"][\"value\"] = \"Hybrid Search\"\n except Exception as _: # noqa: BLE001\n build_config[\"reranker\"][\"options\"] = []\n build_config[\"reranker\"][\"options_metadata\"] = []\n\n # Set the default search field to vector search\n build_config[\"search_method\"][\"show\"] = False\n build_config[\"search_method\"][\"options\"] = [\"Vector Search\"]\n build_config[\"search_method\"][\"value\"] = \"Vector Search\"\n\n # Set reranker and lexical terms options based on search method\n build_config[\"reranker\"][\"toggle_value\"] = True\n build_config[\"reranker\"][\"show\"] = build_config[\"search_method\"][\"value\"] == \"Hybrid Search\"\n build_config[\"reranker\"][\"toggle_disable\"] = build_config[\"search_method\"][\"value\"] == \"Hybrid Search\"\n if build_config[\"reranker\"][\"show\"]:\n build_config[\"search_type\"][\"value\"] = \"Similarity\"\n\n return build_config\n\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Update build configuration based on field name and value.\"\"\"\n # Early return if no token provided\n if not self.token:\n return self.reset_build_config(build_config)\n\n # Database creation callback\n if field_name == \"database_name\" and isinstance(field_value, dict):\n if \"01_new_database_name\" in field_value:\n await self._create_new_database(build_config, field_value)\n return self.reset_collection_list(build_config)\n return self._update_cloud_regions(build_config, field_value)\n\n # Collection creation callback\n if field_name == \"collection_name\" and isinstance(field_value, dict):\n # Case 1: New collection creation\n if \"01_new_collection_name\" in field_value:\n await self._create_new_collection(build_config, field_value)\n return build_config\n\n # Case 2: Update embedding provider options\n if \"02_embedding_generation_provider\" in field_value:\n return self.reset_provider_options(build_config)\n\n # Case 3: Update dimension field\n if \"03_embedding_generation_model\" in field_value:\n return self.reset_dimension_field(build_config)\n\n # Initial execution or token/environment change\n first_run = field_name == \"collection_name\" and not field_value and not build_config[\"database_name\"][\"options\"]\n if first_run or field_name in {\"token\", \"environment\"}:\n return self.reset_database_list(build_config)\n\n # Database selection change\n if field_name == \"database_name\" and not isinstance(field_value, dict):\n return self._handle_database_selection(build_config, field_value)\n\n # Keyspace selection change\n if field_name == \"keyspace\":\n return self.reset_collection_list(build_config)\n\n # Collection selection change\n if field_name == \"collection_name\" and not isinstance(field_value, dict):\n return self._handle_collection_selection(build_config, field_value)\n\n # Search method selection change\n if field_name == \"search_method\":\n is_vector_search = field_value == \"Vector Search\"\n is_autodetect = build_config[\"autodetect_collection\"][\"value\"]\n\n # Configure lexical terms (same for both cases)\n build_config[\"lexical_terms\"][\"show\"] = not is_vector_search\n build_config[\"lexical_terms\"][\"value\"] = \"\" if is_vector_search else build_config[\"lexical_terms\"][\"value\"]\n\n # Disable reranker disabling if hybrid search is selected\n build_config[\"reranker\"][\"toggle_disable\"] = not is_vector_search\n build_config[\"reranker\"][\"toggle_value\"] = True\n build_config[\"reranker\"][\"value\"] = build_config[\"reranker\"][\"options\"][0]\n\n # Toggle search type and score threshold based on search method\n build_config[\"search_type\"][\"show\"] = is_vector_search\n build_config[\"search_score_threshold\"][\"show\"] = is_vector_search\n\n # Make sure the search_type is set to \"Similarity\"\n if not is_vector_search or is_autodetect:\n build_config[\"search_type\"][\"value\"] = \"Similarity\"\n\n return build_config\n\n async def _create_new_database(self, build_config: dict, field_value: dict) -> None:\n \"\"\"Create a new database and update build config options.\"\"\"\n try:\n await self.create_database_api(\n new_database_name=field_value[\"01_new_database_name\"],\n token=self.token,\n keyspace=self.get_keyspace(),\n environment=self.environment,\n cloud_provider=field_value[\"02_cloud_provider\"],\n region=field_value[\"03_region\"],\n )\n except Exception as e:\n msg = f\"Error creating database: {e}\"\n raise ValueError(msg) from e\n\n build_config[\"database_name\"][\"options\"].append(field_value[\"01_new_database_name\"])\n build_config[\"database_name\"][\"options_metadata\"].append(\n {\n \"status\": \"PENDING\",\n \"collections\": 0,\n \"api_endpoint\": None,\n \"keyspaces\": [self.get_keyspace()],\n \"org_id\": None,\n }\n )\n\n def _update_cloud_regions(self, build_config: dict, field_value: dict) -> dict:\n \"\"\"Update cloud provider regions in build config.\"\"\"\n env = self.environment\n cloud_provider = field_value[\"02_cloud_provider\"]\n\n # Update the region options based on the selected cloud provider\n template = build_config[\"database_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n template[\"03_region\"][\"options\"] = self.map_cloud_providers()[env][cloud_provider][\"regions\"]\n\n # Reset the the 03_region value if it's not in the new options\n if template[\"03_region\"][\"value\"] not in template[\"03_region\"][\"options\"]:\n template[\"03_region\"][\"value\"] = None\n\n return build_config\n\n async def _create_new_collection(self, build_config: dict, field_value: dict) -> None:\n \"\"\"Create a new collection and update build config options.\"\"\"\n embedding_provider = field_value.get(\"02_embedding_generation_provider\")\n try:\n await self.create_collection_api(\n new_collection_name=field_value[\"01_new_collection_name\"],\n token=self.token,\n api_endpoint=build_config[\"api_endpoint\"][\"value\"],\n environment=self.environment,\n keyspace=self.get_keyspace(),\n dimension=field_value.get(\"04_dimension\") if embedding_provider == \"Bring your own\" else None,\n embedding_generation_provider=embedding_provider,\n embedding_generation_model=field_value.get(\"03_embedding_generation_model\"),\n reranker=self.reranker,\n )\n except Exception as e:\n msg = f\"Error creating collection: {e}\"\n raise ValueError(msg) from e\n\n provider = embedding_provider.lower() if embedding_provider and embedding_provider != \"Bring your own\" else None\n build_config[\"collection_name\"].update(\n {\n \"value\": field_value[\"01_new_collection_name\"],\n \"options\": build_config[\"collection_name\"][\"options\"] + [field_value[\"01_new_collection_name\"]],\n }\n )\n build_config[\"embedding_model\"][\"show\"] = not bool(provider)\n build_config[\"embedding_model\"][\"required\"] = not bool(provider)\n build_config[\"collection_name\"][\"options_metadata\"].append(\n {\n \"records\": 0,\n \"provider\": provider,\n \"icon\": self.get_provider_icon(provider_name=provider),\n \"model\": field_value.get(\"03_embedding_generation_model\"),\n }\n )\n\n # Make sure we always show the reranker options if the collection is hybrid enabled\n # And right now they always are\n build_config[\"lexical_terms\"][\"show\"] = True\n\n def _handle_database_selection(self, build_config: dict, field_value: str) -> dict:\n \"\"\"Handle database selection and update related configurations.\"\"\"\n build_config = self.reset_database_list(build_config)\n\n # Reset collection list if database selection changes\n if field_value not in build_config[\"database_name\"][\"options\"]:\n build_config[\"database_name\"][\"value\"] = \"\"\n return build_config\n\n # Get the api endpoint for the selected database\n index = build_config[\"database_name\"][\"options\"].index(field_value)\n build_config[\"api_endpoint\"][\"value\"] = build_config[\"database_name\"][\"options_metadata\"][index][\"api_endpoint\"]\n\n # Get the org_id for the selected database\n org_id = build_config[\"database_name\"][\"options_metadata\"][index][\"org_id\"]\n if not org_id:\n return build_config\n\n # Update the list of keyspaces based on the db info\n build_config[\"keyspace\"][\"options\"] = build_config[\"database_name\"][\"options_metadata\"][index][\"keyspaces\"]\n build_config[\"keyspace\"][\"value\"] = (\n build_config[\"keyspace\"][\"options\"] and build_config[\"keyspace\"][\"options\"][0]\n if build_config[\"keyspace\"][\"value\"] not in build_config[\"keyspace\"][\"options\"]\n else build_config[\"keyspace\"][\"value\"]\n )\n\n # Get the database id for the selected database\n db_id = self.get_database_id_static(api_endpoint=build_config[\"api_endpoint\"][\"value\"])\n keyspace = self.get_keyspace()\n\n # Update the helper text for the embedding provider field\n template = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"]\n template[\"02_embedding_generation_provider\"][\"helper_text\"] = (\n \"To create collections with more embedding provider options, go to \"\n f''\n \"your database in Astra DB.\"\n )\n\n # Reset provider options\n build_config = self.reset_provider_options(build_config)\n\n # Handle hybrid search options\n build_config = self._handle_hybrid_search_options(build_config)\n\n return self.reset_collection_list(build_config)\n\n def _handle_collection_selection(self, build_config: dict, field_value: str) -> dict:\n \"\"\"Handle collection selection and update embedding options.\"\"\"\n build_config[\"autodetect_collection\"][\"value\"] = True\n build_config = self.reset_collection_list(build_config)\n\n # Reset embedding model if collection selection changes\n if field_value and field_value not in build_config[\"collection_name\"][\"options\"]:\n build_config[\"collection_name\"][\"options\"].append(field_value)\n build_config[\"collection_name\"][\"options_metadata\"].append(\n {\n \"records\": 0,\n \"provider\": None,\n \"icon\": \"vectorstores\",\n \"model\": None,\n }\n )\n build_config[\"autodetect_collection\"][\"value\"] = False\n\n if not field_value:\n return build_config\n\n # Get the selected collection index\n index = build_config[\"collection_name\"][\"options\"].index(field_value)\n\n # Set the provider of the selected collection\n provider = build_config[\"collection_name\"][\"options_metadata\"][index][\"provider\"]\n build_config[\"embedding_model\"][\"show\"] = not bool(provider)\n build_config[\"embedding_model\"][\"required\"] = not bool(provider)\n\n # Grab the collection object\n database = self.get_database_object(api_endpoint=build_config[\"api_endpoint\"][\"value\"])\n collection = database.get_collection(\n name=field_value,\n keyspace=build_config[\"keyspace\"][\"value\"],\n )\n\n # Check if hybrid and lexical are enabled\n col_options = collection.options()\n hyb_enabled = col_options.rerank and col_options.rerank.enabled\n lex_enabled = col_options.lexical and col_options.lexical.enabled\n user_hyb_enabled = build_config[\"search_method\"][\"value\"] == \"Hybrid Search\"\n\n # Show lexical terms if the collection is hybrid enabled\n build_config[\"lexical_terms\"][\"show\"] = hyb_enabled and lex_enabled and user_hyb_enabled\n\n return build_config\n\n @check_cached_vector_store\n def build_vector_store(self):\n try:\n from langchain_astradb import AstraDBVectorStore\n except ImportError as e:\n msg = (\n \"Could not import langchain Astra DB integration package. \"\n \"Please install it with `pip install langchain-astradb`.\"\n )\n raise ImportError(msg) from e\n\n # Get the embedding model and additional params\n embedding_params = {\"embedding\": self.embedding_model} if self.embedding_model else {}\n\n # Get the additional parameters\n additional_params = self.astradb_vectorstore_kwargs or {}\n\n # Get Langflow version and platform information\n __version__ = get_version_info()[\"version\"]\n langflow_prefix = \"\"\n # if os.getenv(\"AWS_EXECUTION_ENV\") == \"AWS_ECS_FARGATE\": # TODO: More precise way of detecting\n # langflow_prefix = \"ds-\"\n\n # Get the database object\n database = self.get_database_object()\n autodetect = self.collection_name in database.list_collection_names() and self.autodetect_collection\n\n # Bundle up the auto-detect parameters\n autodetect_params = {\n \"autodetect_collection\": autodetect,\n \"content_field\": (\n self.content_field\n if self.content_field and embedding_params\n else (\n \"page_content\"\n if embedding_params\n and self.collection_data(collection_name=self.collection_name, database=database) == 0\n else None\n )\n ),\n \"ignore_invalid_documents\": self.ignore_invalid_documents,\n }\n\n # Choose HybridSearchMode based on the selected param\n hybrid_search_mode = HybridSearchMode.DEFAULT if self.search_method == \"Hybrid Search\" else HybridSearchMode.OFF\n\n # Attempt to build the Vector Store object\n try:\n vector_store = AstraDBVectorStore(\n # Astra DB Authentication Parameters\n token=self.token,\n api_endpoint=database.api_endpoint,\n namespace=database.keyspace,\n collection_name=self.collection_name,\n environment=self.environment,\n # Hybrid Search Parameters\n hybrid_search=hybrid_search_mode,\n # Astra DB Usage Tracking Parameters\n ext_callers=[(f\"{langflow_prefix}langflow\", __version__)],\n # Astra DB Vector Store Parameters\n **autodetect_params,\n **embedding_params,\n **additional_params,\n )\n except Exception as e:\n msg = f\"Error initializing AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n # Add documents to the vector store\n self._add_documents_to_vector_store(vector_store)\n\n return vector_store\n\n def _add_documents_to_vector_store(self, vector_store) -> None:\n self.ingest_data = self._prepare_ingest_data()\n\n documents = []\n for _input in self.ingest_data or []:\n if isinstance(_input, Data):\n documents.append(_input.to_lc_document())\n else:\n msg = \"Vector Store Inputs must be Data objects.\"\n raise TypeError(msg)\n\n if documents and self.deletion_field:\n self.log(f\"Deleting documents where {self.deletion_field}\")\n try:\n database = self.get_database_object()\n collection = database.get_collection(self.collection_name, keyspace=database.keyspace)\n delete_values = list({doc.metadata[self.deletion_field] for doc in documents})\n self.log(f\"Deleting documents where {self.deletion_field} matches {delete_values}.\")\n collection.delete_many({f\"metadata.{self.deletion_field}\": {\"$in\": delete_values}})\n except Exception as e:\n msg = f\"Error deleting documents from AstraDBVectorStore based on '{self.deletion_field}': {e}\"\n raise ValueError(msg) from e\n\n if documents:\n self.log(f\"Adding {len(documents)} documents to the Vector Store.\")\n try:\n vector_store.add_documents(documents)\n except Exception as e:\n msg = f\"Error adding documents to AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n else:\n self.log(\"No documents to add to the Vector Store.\")\n\n def _map_search_type(self) -> str:\n search_type_mapping = {\n \"Similarity with score threshold\": \"similarity_score_threshold\",\n \"MMR (Max Marginal Relevance)\": \"mmr\",\n }\n\n return search_type_mapping.get(self.search_type, \"similarity\")\n\n def _build_search_args(self):\n # Clean up the search query\n query = self.search_query if isinstance(self.search_query, str) and self.search_query.strip() else None\n lexical_terms = self.lexical_terms or None\n\n # Check if we have a search query, and if so set the args\n if query:\n args = {\n \"query\": query,\n \"search_type\": self._map_search_type(),\n \"k\": self.number_of_results,\n \"score_threshold\": self.search_score_threshold,\n \"lexical_query\": lexical_terms,\n }\n elif self.advanced_search_filter:\n args = {\n \"n\": self.number_of_results,\n }\n else:\n return {}\n\n filter_arg = self.advanced_search_filter or {}\n if filter_arg:\n args[\"filter\"] = filter_arg\n\n return args\n\n def search_documents(self, vector_store=None) -> list[Data]:\n vector_store = vector_store or self.build_vector_store()\n\n self.log(f\"Search input: {self.search_query}\")\n self.log(f\"Search type: {self.search_type}\")\n self.log(f\"Number of results: {self.number_of_results}\")\n self.log(f\"store.hybrid_search: {vector_store.hybrid_search}\")\n self.log(f\"Lexical terms: {self.lexical_terms}\")\n self.log(f\"Reranker: {self.reranker}\")\n\n try:\n search_args = self._build_search_args()\n except Exception as e:\n msg = f\"Error in AstraDBVectorStore._build_search_args: {e}\"\n raise ValueError(msg) from e\n\n if not search_args:\n self.log(\"No search input or filters provided. Skipping search.\")\n return []\n\n docs = []\n search_method = \"search\" if \"query\" in search_args else \"metadata_search\"\n\n try:\n self.log(f\"Calling vector_store.{search_method} with args: {search_args}\")\n docs = getattr(vector_store, search_method)(**search_args)\n except Exception as e:\n msg = f\"Error performing {search_method} in AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n self.log(f\"Retrieved documents: {len(docs)}\")\n\n data = docs_to_data(docs)\n self.log(f\"Converted documents to data: {len(data)}\")\n self.status = data\n\n return data\n\n def get_retriever_kwargs(self):\n search_args = self._build_search_args()\n\n return {\n \"search_type\": self._map_search_type(),\n \"search_kwargs\": search_args,\n }\n"
},
"collection_name": {
"_input_type": "DropdownInput",
diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Youtube Analysis.json b/src/backend/base/langflow/initial_setup/starter_projects/Youtube Analysis.json
index f2103ba56..42d08d9dc 100644
--- a/src/backend/base/langflow/initial_setup/starter_projects/Youtube Analysis.json
+++ b/src/backend/base/langflow/initial_setup/starter_projects/Youtube Analysis.json
@@ -377,7 +377,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, cast\n\nimport toml # type: ignore[import-untyped]\nfrom loguru import logger\n\nfrom langflow.custom import Component\nfrom langflow.io import BoolInput, DataFrameInput, HandleInput, MessageTextInput, MultilineInput, Output\nfrom langflow.schema import DataFrame\n\nif TYPE_CHECKING:\n from langchain_core.runnables import Runnable\n\n\nclass BatchRunComponent(Component):\n display_name = \"Batch Run\"\n description = \"Runs an LLM on each row of a DataFrame column. If no column is specified, all columns are used.\"\n icon = \"List\"\n\n inputs = [\n HandleInput(\n name=\"model\",\n display_name=\"Language Model\",\n info=\"Connect the 'Language Model' output from your LLM component here.\",\n input_types=[\"LanguageModel\"],\n required=True,\n ),\n MultilineInput(\n name=\"system_message\",\n display_name=\"Instructions\",\n info=\"Multi-line system instruction for all rows in the DataFrame.\",\n required=False,\n ),\n DataFrameInput(\n name=\"df\",\n display_name=\"DataFrame\",\n info=\"The DataFrame whose column (specified by 'column_name') we'll treat as text messages.\",\n required=True,\n ),\n MessageTextInput(\n name=\"column_name\",\n display_name=\"Column Name\",\n info=(\n \"The name of the DataFrame column to treat as text messages. \"\n \"If empty, all columns will be formatted in TOML.\"\n ),\n required=False,\n advanced=False,\n ),\n MessageTextInput(\n name=\"output_column_name\",\n display_name=\"Output Column Name\",\n info=\"Name of the column where the model's response will be stored.\",\n value=\"model_response\",\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"enable_metadata\",\n display_name=\"Enable Metadata\",\n info=\"If True, add metadata to the output DataFrame.\",\n value=False,\n required=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"LLM Results\",\n name=\"batch_results\",\n method=\"run_batch\",\n info=\"A DataFrame with all original columns plus the model's response column.\",\n ),\n ]\n\n def _format_row_as_toml(self, row: dict[str, Any]) -> str:\n \"\"\"Convert a dictionary (row) into a TOML-formatted string.\"\"\"\n formatted_dict = {str(col): {\"value\": str(val)} for col, val in row.items()}\n return toml.dumps(formatted_dict)\n\n def _create_base_row(\n self, original_row: dict[str, Any], model_response: str = \"\", batch_index: int = -1\n ) -> dict[str, Any]:\n \"\"\"Create a base row with original columns and additional metadata.\"\"\"\n row = original_row.copy()\n row[self.output_column_name] = model_response\n row[\"batch_index\"] = batch_index\n return row\n\n def _add_metadata(\n self, row: dict[str, Any], *, success: bool = True, system_msg: str = \"\", error: str | None = None\n ) -> None:\n \"\"\"Add metadata to a row if enabled.\"\"\"\n if not self.enable_metadata:\n return\n\n if success:\n row[\"metadata\"] = {\n \"has_system_message\": bool(system_msg),\n \"input_length\": len(row.get(\"text_input\", \"\")),\n \"response_length\": len(row[self.output_column_name]),\n \"processing_status\": \"success\",\n }\n else:\n row[\"metadata\"] = {\n \"error\": error,\n \"processing_status\": \"failed\",\n }\n\n async def run_batch(self) -> DataFrame:\n \"\"\"Process each row in df[column_name] with the language model asynchronously.\n\n Returns:\n DataFrame: A new DataFrame containing:\n - All original columns\n - The model's response column (customizable name)\n - 'batch_index' column for processing order\n - 'metadata' (optional)\n\n Raises:\n ValueError: If the specified column is not found in the DataFrame\n TypeError: If the model is not compatible or input types are wrong\n \"\"\"\n model: Runnable = self.model\n system_msg = self.system_message or \"\"\n df: DataFrame = self.df\n col_name = self.column_name or \"\"\n\n # Validate inputs first\n if not isinstance(df, DataFrame):\n msg = f\"Expected DataFrame input, got {type(df)}\"\n raise TypeError(msg)\n\n if col_name and col_name not in df.columns:\n msg = f\"Column '{col_name}' not found in the DataFrame. Available columns: {', '.join(df.columns)}\"\n raise ValueError(msg)\n\n try:\n # Determine text input for each row\n if col_name:\n user_texts = df[col_name].astype(str).tolist()\n else:\n user_texts = [\n self._format_row_as_toml(cast(dict[str, Any], row)) for row in df.to_dict(orient=\"records\")\n ]\n\n total_rows = len(user_texts)\n logger.info(f\"Processing {total_rows} rows with batch run\")\n\n # Prepare the batch of conversations\n conversations = [\n [{\"role\": \"system\", \"content\": system_msg}, {\"role\": \"user\", \"content\": text}]\n if system_msg\n else [{\"role\": \"user\", \"content\": text}]\n for text in user_texts\n ]\n\n # Configure the model with project info and callbacks\n model = model.with_config(\n {\n \"run_name\": self.display_name,\n \"project_name\": self.get_project_name(),\n \"callbacks\": self.get_langchain_callbacks(),\n }\n )\n # Process batches and track progress\n responses_with_idx = list(\n zip(\n range(len(conversations)),\n await model.abatch(list(conversations)),\n strict=True,\n )\n )\n\n # Sort by index to maintain order\n responses_with_idx.sort(key=lambda x: x[0])\n\n # Build the final data with enhanced metadata\n rows: list[dict[str, Any]] = []\n for idx, (original_row, response) in enumerate(\n zip(df.to_dict(orient=\"records\"), responses_with_idx, strict=False)\n ):\n response_text = response[1].content if hasattr(response[1], \"content\") else str(response[1])\n row = self._create_base_row(\n cast(dict[str, Any], original_row), model_response=response_text, batch_index=idx\n )\n self._add_metadata(row, success=True, system_msg=system_msg)\n rows.append(row)\n\n # Log progress\n if (idx + 1) % max(1, total_rows // 10) == 0:\n logger.info(f\"Processed {idx + 1}/{total_rows} rows\")\n\n logger.info(\"Batch processing completed successfully\")\n return DataFrame(rows)\n\n except (KeyError, AttributeError) as e:\n # Handle data structure and attribute access errors\n logger.error(f\"Data processing error: {e!s}\")\n error_row = self._create_base_row({col: \"\" for col in df.columns}, model_response=\"\", batch_index=-1)\n self._add_metadata(error_row, success=False, error=str(e))\n return DataFrame([error_row])\n"
+ "value": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, cast\n\nimport toml # type: ignore[import-untyped]\nfrom loguru import logger\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.io import BoolInput, DataFrameInput, HandleInput, MessageTextInput, MultilineInput, Output\nfrom langflow.schema.dataframe import DataFrame\n\nif TYPE_CHECKING:\n from langchain_core.runnables import Runnable\n\n\nclass BatchRunComponent(Component):\n display_name = \"Batch Run\"\n description = \"Runs an LLM on each row of a DataFrame column. If no column is specified, all columns are used.\"\n icon = \"List\"\n\n inputs = [\n HandleInput(\n name=\"model\",\n display_name=\"Language Model\",\n info=\"Connect the 'Language Model' output from your LLM component here.\",\n input_types=[\"LanguageModel\"],\n required=True,\n ),\n MultilineInput(\n name=\"system_message\",\n display_name=\"Instructions\",\n info=\"Multi-line system instruction for all rows in the DataFrame.\",\n required=False,\n ),\n DataFrameInput(\n name=\"df\",\n display_name=\"DataFrame\",\n info=\"The DataFrame whose column (specified by 'column_name') we'll treat as text messages.\",\n required=True,\n ),\n MessageTextInput(\n name=\"column_name\",\n display_name=\"Column Name\",\n info=(\n \"The name of the DataFrame column to treat as text messages. \"\n \"If empty, all columns will be formatted in TOML.\"\n ),\n required=False,\n advanced=False,\n ),\n MessageTextInput(\n name=\"output_column_name\",\n display_name=\"Output Column Name\",\n info=\"Name of the column where the model's response will be stored.\",\n value=\"model_response\",\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"enable_metadata\",\n display_name=\"Enable Metadata\",\n info=\"If True, add metadata to the output DataFrame.\",\n value=False,\n required=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"LLM Results\",\n name=\"batch_results\",\n method=\"run_batch\",\n info=\"A DataFrame with all original columns plus the model's response column.\",\n ),\n ]\n\n def _format_row_as_toml(self, row: dict[str, Any]) -> str:\n \"\"\"Convert a dictionary (row) into a TOML-formatted string.\"\"\"\n formatted_dict = {str(col): {\"value\": str(val)} for col, val in row.items()}\n return toml.dumps(formatted_dict)\n\n def _create_base_row(\n self, original_row: dict[str, Any], model_response: str = \"\", batch_index: int = -1\n ) -> dict[str, Any]:\n \"\"\"Create a base row with original columns and additional metadata.\"\"\"\n row = original_row.copy()\n row[self.output_column_name] = model_response\n row[\"batch_index\"] = batch_index\n return row\n\n def _add_metadata(\n self, row: dict[str, Any], *, success: bool = True, system_msg: str = \"\", error: str | None = None\n ) -> None:\n \"\"\"Add metadata to a row if enabled.\"\"\"\n if not self.enable_metadata:\n return\n\n if success:\n row[\"metadata\"] = {\n \"has_system_message\": bool(system_msg),\n \"input_length\": len(row.get(\"text_input\", \"\")),\n \"response_length\": len(row[self.output_column_name]),\n \"processing_status\": \"success\",\n }\n else:\n row[\"metadata\"] = {\n \"error\": error,\n \"processing_status\": \"failed\",\n }\n\n async def run_batch(self) -> DataFrame:\n \"\"\"Process each row in df[column_name] with the language model asynchronously.\n\n Returns:\n DataFrame: A new DataFrame containing:\n - All original columns\n - The model's response column (customizable name)\n - 'batch_index' column for processing order\n - 'metadata' (optional)\n\n Raises:\n ValueError: If the specified column is not found in the DataFrame\n TypeError: If the model is not compatible or input types are wrong\n \"\"\"\n model: Runnable = self.model\n system_msg = self.system_message or \"\"\n df: DataFrame = self.df\n col_name = self.column_name or \"\"\n\n # Validate inputs first\n if not isinstance(df, DataFrame):\n msg = f\"Expected DataFrame input, got {type(df)}\"\n raise TypeError(msg)\n\n if col_name and col_name not in df.columns:\n msg = f\"Column '{col_name}' not found in the DataFrame. Available columns: {', '.join(df.columns)}\"\n raise ValueError(msg)\n\n try:\n # Determine text input for each row\n if col_name:\n user_texts = df[col_name].astype(str).tolist()\n else:\n user_texts = [\n self._format_row_as_toml(cast(dict[str, Any], row)) for row in df.to_dict(orient=\"records\")\n ]\n\n total_rows = len(user_texts)\n logger.info(f\"Processing {total_rows} rows with batch run\")\n\n # Prepare the batch of conversations\n conversations = [\n [{\"role\": \"system\", \"content\": system_msg}, {\"role\": \"user\", \"content\": text}]\n if system_msg\n else [{\"role\": \"user\", \"content\": text}]\n for text in user_texts\n ]\n\n # Configure the model with project info and callbacks\n model = model.with_config(\n {\n \"run_name\": self.display_name,\n \"project_name\": self.get_project_name(),\n \"callbacks\": self.get_langchain_callbacks(),\n }\n )\n # Process batches and track progress\n responses_with_idx = list(\n zip(\n range(len(conversations)),\n await model.abatch(list(conversations)),\n strict=True,\n )\n )\n\n # Sort by index to maintain order\n responses_with_idx.sort(key=lambda x: x[0])\n\n # Build the final data with enhanced metadata\n rows: list[dict[str, Any]] = []\n for idx, (original_row, response) in enumerate(\n zip(df.to_dict(orient=\"records\"), responses_with_idx, strict=False)\n ):\n response_text = response[1].content if hasattr(response[1], \"content\") else str(response[1])\n row = self._create_base_row(\n cast(dict[str, Any], original_row), model_response=response_text, batch_index=idx\n )\n self._add_metadata(row, success=True, system_msg=system_msg)\n rows.append(row)\n\n # Log progress\n if (idx + 1) % max(1, total_rows // 10) == 0:\n logger.info(f\"Processed {idx + 1}/{total_rows} rows\")\n\n logger.info(\"Batch processing completed successfully\")\n return DataFrame(rows)\n\n except (KeyError, AttributeError) as e:\n # Handle data structure and attribute access errors\n logger.error(f\"Data processing error: {e!s}\")\n error_row = self._create_base_row({col: \"\" for col in df.columns}, model_response=\"\", batch_index=-1)\n self._add_metadata(error_row, success=False, error=str(e))\n return DataFrame([error_row])\n"
},
"column_name": {
"_input_type": "StrInput",
@@ -608,7 +608,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from contextlib import contextmanager\n\nimport pandas as pd\nfrom googleapiclient.discovery import build\nfrom googleapiclient.errors import HttpError\n\nfrom langflow.custom import Component\nfrom langflow.inputs import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput\nfrom langflow.schema import DataFrame\nfrom langflow.template import Output\n\n\nclass YouTubeCommentsComponent(Component):\n \"\"\"A component that retrieves comments from YouTube videos.\"\"\"\n\n display_name: str = \"YouTube Comments\"\n description: str = \"Retrieves and analyzes comments from YouTube videos.\"\n icon: str = \"YouTube\"\n\n # Constants\n COMMENTS_DISABLED_STATUS = 403\n NOT_FOUND_STATUS = 404\n API_MAX_RESULTS = 100\n\n inputs = [\n MessageTextInput(\n name=\"video_url\",\n display_name=\"Video URL\",\n info=\"The URL of the YouTube video to get comments from.\",\n tool_mode=True,\n required=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"YouTube API Key\",\n info=\"Your YouTube Data API key.\",\n required=True,\n ),\n IntInput(\n name=\"max_results\",\n display_name=\"Max Results\",\n value=20,\n info=\"The maximum number of comments to return.\",\n ),\n DropdownInput(\n name=\"sort_by\",\n display_name=\"Sort By\",\n options=[\"time\", \"relevance\"],\n value=\"relevance\",\n info=\"Sort comments by time or relevance.\",\n ),\n BoolInput(\n name=\"include_replies\",\n display_name=\"Include Replies\",\n value=False,\n info=\"Whether to include replies to comments.\",\n advanced=True,\n ),\n BoolInput(\n name=\"include_metrics\",\n display_name=\"Include Metrics\",\n value=True,\n info=\"Include metrics like like count and reply count.\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(name=\"comments\", display_name=\"Comments\", method=\"get_video_comments\"),\n ]\n\n def _extract_video_id(self, video_url: str) -> str:\n \"\"\"Extracts the video ID from a YouTube URL.\"\"\"\n import re\n\n patterns = [\n r\"(?:youtube\\.com\\/watch\\?v=|youtu.be\\/|youtube.com\\/embed\\/)([^&\\n?#]+)\",\n r\"youtube.com\\/shorts\\/([^&\\n?#]+)\",\n ]\n\n for pattern in patterns:\n match = re.search(pattern, video_url)\n if match:\n return match.group(1)\n\n return video_url.strip()\n\n def _process_reply(self, reply: dict, parent_id: str, *, include_metrics: bool = True) -> dict:\n \"\"\"Process a single reply comment.\"\"\"\n reply_snippet = reply[\"snippet\"]\n reply_data = {\n \"comment_id\": reply[\"id\"],\n \"parent_comment_id\": parent_id,\n \"author\": reply_snippet[\"authorDisplayName\"],\n \"text\": reply_snippet[\"textDisplay\"],\n \"published_at\": reply_snippet[\"publishedAt\"],\n \"is_reply\": True,\n }\n if include_metrics:\n reply_data[\"like_count\"] = reply_snippet[\"likeCount\"]\n reply_data[\"reply_count\"] = 0 # Replies can't have replies\n\n return reply_data\n\n def _process_comment(\n self, item: dict, *, include_metrics: bool = True, include_replies: bool = False\n ) -> list[dict]:\n \"\"\"Process a single comment thread.\"\"\"\n comment = item[\"snippet\"][\"topLevelComment\"][\"snippet\"]\n comment_id = item[\"snippet\"][\"topLevelComment\"][\"id\"]\n\n # Basic comment data\n processed_comments = [\n {\n \"comment_id\": comment_id,\n \"parent_comment_id\": \"\", # Empty for top-level comments\n \"author\": comment[\"authorDisplayName\"],\n \"author_channel_url\": comment.get(\"authorChannelUrl\", \"\"),\n \"text\": comment[\"textDisplay\"],\n \"published_at\": comment[\"publishedAt\"],\n \"updated_at\": comment[\"updatedAt\"],\n \"is_reply\": False,\n }\n ]\n\n # Add metrics if requested\n if include_metrics:\n processed_comments[0].update(\n {\n \"like_count\": comment[\"likeCount\"],\n \"reply_count\": item[\"snippet\"][\"totalReplyCount\"],\n }\n )\n\n # Add replies if requested\n if include_replies and item[\"snippet\"][\"totalReplyCount\"] > 0 and \"replies\" in item:\n for reply in item[\"replies\"][\"comments\"]:\n reply_data = self._process_reply(reply, parent_id=comment_id, include_metrics=include_metrics)\n processed_comments.append(reply_data)\n\n return processed_comments\n\n @contextmanager\n def youtube_client(self):\n \"\"\"Context manager for YouTube API client.\"\"\"\n client = build(\"youtube\", \"v3\", developerKey=self.api_key)\n try:\n yield client\n finally:\n client.close()\n\n def get_video_comments(self) -> DataFrame:\n \"\"\"Retrieves comments from a YouTube video and returns as DataFrame.\"\"\"\n try:\n # Extract video ID from URL\n video_id = self._extract_video_id(self.video_url)\n\n # Use context manager for YouTube API client\n with self.youtube_client() as youtube:\n comments_data = []\n results_count = 0\n request = youtube.commentThreads().list(\n part=\"snippet,replies\",\n videoId=video_id,\n maxResults=min(self.API_MAX_RESULTS, self.max_results),\n order=self.sort_by,\n textFormat=\"plainText\",\n )\n\n while request and results_count < self.max_results:\n response = request.execute()\n\n for item in response.get(\"items\", []):\n if results_count >= self.max_results:\n break\n\n comments = self._process_comment(\n item, include_metrics=self.include_metrics, include_replies=self.include_replies\n )\n comments_data.extend(comments)\n results_count += 1\n\n # Get the next page if available and needed\n if \"nextPageToken\" in response and results_count < self.max_results:\n request = youtube.commentThreads().list(\n part=\"snippet,replies\",\n videoId=video_id,\n maxResults=min(self.API_MAX_RESULTS, self.max_results - results_count),\n order=self.sort_by,\n textFormat=\"plainText\",\n pageToken=response[\"nextPageToken\"],\n )\n else:\n request = None\n\n # Convert to DataFrame\n comments_df = pd.DataFrame(comments_data)\n\n # Add video metadata\n comments_df[\"video_id\"] = video_id\n comments_df[\"video_url\"] = self.video_url\n\n # Sort columns for better organization\n column_order = [\n \"video_id\",\n \"video_url\",\n \"comment_id\",\n \"parent_comment_id\",\n \"is_reply\",\n \"author\",\n \"author_channel_url\",\n \"text\",\n \"published_at\",\n \"updated_at\",\n ]\n\n if self.include_metrics:\n column_order.extend([\"like_count\", \"reply_count\"])\n\n comments_df = comments_df[column_order]\n\n return DataFrame(comments_df)\n\n except HttpError as e:\n error_message = f\"YouTube API error: {e!s}\"\n if e.resp.status == self.COMMENTS_DISABLED_STATUS:\n error_message = \"Comments are disabled for this video or API quota exceeded.\"\n elif e.resp.status == self.NOT_FOUND_STATUS:\n error_message = \"Video not found.\"\n\n return DataFrame(pd.DataFrame({\"error\": [error_message]}))\n"
+ "value": "from contextlib import contextmanager\n\nimport pandas as pd\nfrom googleapiclient.discovery import build\nfrom googleapiclient.errors import HttpError\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.template.field.base import Output\n\n\nclass YouTubeCommentsComponent(Component):\n \"\"\"A component that retrieves comments from YouTube videos.\"\"\"\n\n display_name: str = \"YouTube Comments\"\n description: str = \"Retrieves and analyzes comments from YouTube videos.\"\n icon: str = \"YouTube\"\n\n # Constants\n COMMENTS_DISABLED_STATUS = 403\n NOT_FOUND_STATUS = 404\n API_MAX_RESULTS = 100\n\n inputs = [\n MessageTextInput(\n name=\"video_url\",\n display_name=\"Video URL\",\n info=\"The URL of the YouTube video to get comments from.\",\n tool_mode=True,\n required=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"YouTube API Key\",\n info=\"Your YouTube Data API key.\",\n required=True,\n ),\n IntInput(\n name=\"max_results\",\n display_name=\"Max Results\",\n value=20,\n info=\"The maximum number of comments to return.\",\n ),\n DropdownInput(\n name=\"sort_by\",\n display_name=\"Sort By\",\n options=[\"time\", \"relevance\"],\n value=\"relevance\",\n info=\"Sort comments by time or relevance.\",\n ),\n BoolInput(\n name=\"include_replies\",\n display_name=\"Include Replies\",\n value=False,\n info=\"Whether to include replies to comments.\",\n advanced=True,\n ),\n BoolInput(\n name=\"include_metrics\",\n display_name=\"Include Metrics\",\n value=True,\n info=\"Include metrics like like count and reply count.\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(name=\"comments\", display_name=\"Comments\", method=\"get_video_comments\"),\n ]\n\n def _extract_video_id(self, video_url: str) -> str:\n \"\"\"Extracts the video ID from a YouTube URL.\"\"\"\n import re\n\n patterns = [\n r\"(?:youtube\\.com\\/watch\\?v=|youtu.be\\/|youtube.com\\/embed\\/)([^&\\n?#]+)\",\n r\"youtube.com\\/shorts\\/([^&\\n?#]+)\",\n ]\n\n for pattern in patterns:\n match = re.search(pattern, video_url)\n if match:\n return match.group(1)\n\n return video_url.strip()\n\n def _process_reply(self, reply: dict, parent_id: str, *, include_metrics: bool = True) -> dict:\n \"\"\"Process a single reply comment.\"\"\"\n reply_snippet = reply[\"snippet\"]\n reply_data = {\n \"comment_id\": reply[\"id\"],\n \"parent_comment_id\": parent_id,\n \"author\": reply_snippet[\"authorDisplayName\"],\n \"text\": reply_snippet[\"textDisplay\"],\n \"published_at\": reply_snippet[\"publishedAt\"],\n \"is_reply\": True,\n }\n if include_metrics:\n reply_data[\"like_count\"] = reply_snippet[\"likeCount\"]\n reply_data[\"reply_count\"] = 0 # Replies can't have replies\n\n return reply_data\n\n def _process_comment(\n self, item: dict, *, include_metrics: bool = True, include_replies: bool = False\n ) -> list[dict]:\n \"\"\"Process a single comment thread.\"\"\"\n comment = item[\"snippet\"][\"topLevelComment\"][\"snippet\"]\n comment_id = item[\"snippet\"][\"topLevelComment\"][\"id\"]\n\n # Basic comment data\n processed_comments = [\n {\n \"comment_id\": comment_id,\n \"parent_comment_id\": \"\", # Empty for top-level comments\n \"author\": comment[\"authorDisplayName\"],\n \"author_channel_url\": comment.get(\"authorChannelUrl\", \"\"),\n \"text\": comment[\"textDisplay\"],\n \"published_at\": comment[\"publishedAt\"],\n \"updated_at\": comment[\"updatedAt\"],\n \"is_reply\": False,\n }\n ]\n\n # Add metrics if requested\n if include_metrics:\n processed_comments[0].update(\n {\n \"like_count\": comment[\"likeCount\"],\n \"reply_count\": item[\"snippet\"][\"totalReplyCount\"],\n }\n )\n\n # Add replies if requested\n if include_replies and item[\"snippet\"][\"totalReplyCount\"] > 0 and \"replies\" in item:\n for reply in item[\"replies\"][\"comments\"]:\n reply_data = self._process_reply(reply, parent_id=comment_id, include_metrics=include_metrics)\n processed_comments.append(reply_data)\n\n return processed_comments\n\n @contextmanager\n def youtube_client(self):\n \"\"\"Context manager for YouTube API client.\"\"\"\n client = build(\"youtube\", \"v3\", developerKey=self.api_key)\n try:\n yield client\n finally:\n client.close()\n\n def get_video_comments(self) -> DataFrame:\n \"\"\"Retrieves comments from a YouTube video and returns as DataFrame.\"\"\"\n try:\n # Extract video ID from URL\n video_id = self._extract_video_id(self.video_url)\n\n # Use context manager for YouTube API client\n with self.youtube_client() as youtube:\n comments_data = []\n results_count = 0\n request = youtube.commentThreads().list(\n part=\"snippet,replies\",\n videoId=video_id,\n maxResults=min(self.API_MAX_RESULTS, self.max_results),\n order=self.sort_by,\n textFormat=\"plainText\",\n )\n\n while request and results_count < self.max_results:\n response = request.execute()\n\n for item in response.get(\"items\", []):\n if results_count >= self.max_results:\n break\n\n comments = self._process_comment(\n item, include_metrics=self.include_metrics, include_replies=self.include_replies\n )\n comments_data.extend(comments)\n results_count += 1\n\n # Get the next page if available and needed\n if \"nextPageToken\" in response and results_count < self.max_results:\n request = youtube.commentThreads().list(\n part=\"snippet,replies\",\n videoId=video_id,\n maxResults=min(self.API_MAX_RESULTS, self.max_results - results_count),\n order=self.sort_by,\n textFormat=\"plainText\",\n pageToken=response[\"nextPageToken\"],\n )\n else:\n request = None\n\n # Convert to DataFrame\n comments_df = pd.DataFrame(comments_data)\n\n # Add video metadata\n comments_df[\"video_id\"] = video_id\n comments_df[\"video_url\"] = self.video_url\n\n # Sort columns for better organization\n column_order = [\n \"video_id\",\n \"video_url\",\n \"comment_id\",\n \"parent_comment_id\",\n \"is_reply\",\n \"author\",\n \"author_channel_url\",\n \"text\",\n \"published_at\",\n \"updated_at\",\n ]\n\n if self.include_metrics:\n column_order.extend([\"like_count\", \"reply_count\"])\n\n comments_df = comments_df[column_order]\n\n return DataFrame(comments_df)\n\n except HttpError as e:\n error_message = f\"YouTube API error: {e!s}\"\n if e.resp.status == self.COMMENTS_DISABLED_STATUS:\n error_message = \"Comments are disabled for this video or API quota exceeded.\"\n elif e.resp.status == self.NOT_FOUND_STATUS:\n error_message = \"Video not found.\"\n\n return DataFrame(pd.DataFrame({\"error\": [error_message]}))\n"
},
"include_metrics": {
"_input_type": "BoolInput",
@@ -844,7 +844,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
+ "value": "from typing import Any\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import (\n OPENAI_MODEL_NAMES,\n OPENAI_REASONING_MODEL_NAMES,\n)\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging import logger\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES + OPENAI_REASONING_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[1],\n combobox=True,\n real_time_refresh=True,\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n show=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n IntInput(\n name=\"max_retries\",\n display_name=\"Max Retries\",\n info=\"The maximum number of retries to make when generating.\",\n advanced=True,\n value=5,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"The timeout for requests to OpenAI completion API.\",\n advanced=True,\n value=700,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n parameters = {\n \"api_key\": SecretStr(self.api_key).get_secret_value() if self.api_key else None,\n \"model_name\": self.model_name,\n \"max_tokens\": self.max_tokens or None,\n \"model_kwargs\": self.model_kwargs or {},\n \"base_url\": self.openai_api_base or \"https://api.openai.com/v1\",\n \"seed\": self.seed,\n \"max_retries\": self.max_retries,\n \"timeout\": self.timeout,\n \"temperature\": self.temperature if self.temperature is not None else 0.1,\n }\n\n logger.info(f\"Model name: {self.model_name}\")\n if self.model_name in OPENAI_REASONING_MODEL_NAMES:\n logger.info(\"Getting reasoning model parameters\")\n parameters.pop(\"temperature\")\n parameters.pop(\"seed\")\n output = ChatOpenAI(**parameters)\n if self.json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_REASONING_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = False\n build_config[\"seed\"][\"show\"] = False\n if field_name in {\"base_url\", \"model_name\", \"api_key\"} and field_value in OPENAI_MODEL_NAMES:\n build_config[\"temperature\"][\"show\"] = True\n build_config[\"seed\"][\"show\"] = True\n return build_config\n"
},
"input_value": {
"_input_type": "MessageInput",
@@ -1298,7 +1298,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
+ "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.agents.events import ExceptionWithMessageError\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODELS_METADATA,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers.current_date import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())] + [{\"icon\": \"brain\"}],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n # Get LLM model and validate\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Validate tools\n if not self.tools:\n msg = \"Tools are required to run the agent. Please add at least one tool.\"\n raise ValueError(msg)\n\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n return await self.run_agent(agent)\n\n except (ValueError, TypeError, KeyError) as e:\n logger.error(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n logger.error(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error: {e!s}\")\n raise\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v is not None}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except Exception as e:\n logger.error(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n options_metadata=[MODELS_METADATA[key] for key in sorted(MODELS_METADATA.keys())]\n + [{\"icon\": \"brain\"}],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\", tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n"
},
"handle_parsing_errors": {
"_input_type": "BoolInput",
@@ -1926,7 +1926,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
+ "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"template": {
"_input_type": "PromptInput",
@@ -2144,7 +2144,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs import BoolInput\nfrom langflow.inputs.inputs import HandleInput\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
+ "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.template.field.base import Output\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n info=\"Whether to clean the data\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n return \"\\n\".join([safe_convert(item, clean_data=self.clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
},
"data_template": {
"_input_type": "MessageTextInput",
@@ -2405,7 +2405,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import pandas as pd\nimport youtube_transcript_api\nfrom langchain_community.document_loaders import YoutubeLoader\nfrom langchain_community.document_loaders.youtube import TranscriptFormat\n\nfrom langflow.custom import Component\nfrom langflow.inputs import DropdownInput, IntInput, MultilineInput\nfrom langflow.schema import Data, DataFrame, Message\nfrom langflow.template import Output\n\n\nclass YouTubeTranscriptsComponent(Component):\n \"\"\"A component that extracts spoken content from YouTube videos as transcripts.\"\"\"\n\n display_name: str = \"YouTube Transcripts\"\n description: str = \"Extracts spoken content from YouTube videos with multiple output options.\"\n icon: str = \"YouTube\"\n name = \"YouTubeTranscripts\"\n\n inputs = [\n MultilineInput(\n name=\"url\",\n display_name=\"Video URL\",\n info=\"Enter the YouTube video URL to get transcripts from.\",\n tool_mode=True,\n required=True,\n ),\n IntInput(\n name=\"chunk_size_seconds\",\n display_name=\"Chunk Size (seconds)\",\n value=60,\n info=\"The size of each transcript chunk in seconds.\",\n ),\n DropdownInput(\n name=\"translation\",\n display_name=\"Translation Language\",\n advanced=True,\n options=[\"\", \"en\", \"es\", \"fr\", \"de\", \"it\", \"pt\", \"ru\", \"ja\", \"ko\", \"hi\", \"ar\", \"id\"],\n info=\"Translate the transcripts to the specified language. Leave empty for no translation.\",\n ),\n ]\n\n outputs = [\n Output(name=\"dataframe\", display_name=\"Chunks\", method=\"get_dataframe_output\"),\n Output(name=\"message\", display_name=\"Transcript\", method=\"get_message_output\"),\n Output(name=\"data_output\", display_name=\"Transcript + Source\", method=\"get_data_output\"),\n ]\n\n def _load_transcripts(self, *, as_chunks: bool = True):\n \"\"\"Internal method to load transcripts from YouTube.\"\"\"\n loader = YoutubeLoader.from_youtube_url(\n self.url,\n transcript_format=TranscriptFormat.CHUNKS if as_chunks else TranscriptFormat.TEXT,\n chunk_size_seconds=self.chunk_size_seconds,\n translation=self.translation or None,\n )\n return loader.load()\n\n def get_dataframe_output(self) -> DataFrame:\n \"\"\"Provides transcript output as a DataFrame with timestamp and text columns.\"\"\"\n try:\n transcripts = self._load_transcripts(as_chunks=True)\n\n # Create DataFrame with timestamp and text columns\n data = []\n for doc in transcripts:\n start_seconds = int(doc.metadata[\"start_seconds\"])\n start_minutes = start_seconds // 60\n start_seconds %= 60\n timestamp = f\"{start_minutes:02d}:{start_seconds:02d}\"\n data.append({\"timestamp\": timestamp, \"text\": doc.page_content})\n\n return DataFrame(pd.DataFrame(data))\n\n except (youtube_transcript_api.TranscriptsDisabled, youtube_transcript_api.NoTranscriptFound) as exc:\n return DataFrame(pd.DataFrame({\"error\": [f\"Failed to get YouTube transcripts: {exc!s}\"]}))\n\n def get_message_output(self) -> Message:\n \"\"\"Provides transcript output as continuous text.\"\"\"\n try:\n transcripts = self._load_transcripts(as_chunks=False)\n result = transcripts[0].page_content\n return Message(text=result)\n\n except (youtube_transcript_api.TranscriptsDisabled, youtube_transcript_api.NoTranscriptFound) as exc:\n error_msg = f\"Failed to get YouTube transcripts: {exc!s}\"\n return Message(text=error_msg)\n\n def get_data_output(self) -> Data:\n \"\"\"Creates a structured data object with transcript and metadata.\n\n Returns a Data object containing transcript text, video URL, and any error\n messages that occurred during processing. The object includes:\n - 'transcript': continuous text from the entire video (concatenated if multiple parts)\n - 'video_url': the input YouTube URL\n - 'error': error message if an exception occurs\n \"\"\"\n default_data = {\"transcript\": \"\", \"video_url\": self.url, \"error\": None}\n\n try:\n transcripts = self._load_transcripts(as_chunks=False)\n if not transcripts:\n default_data[\"error\"] = \"No transcripts found.\"\n return Data(data=default_data)\n\n # Combine all transcript parts\n full_transcript = \" \".join(doc.page_content for doc in transcripts)\n return Data(data={\"transcript\": full_transcript, \"video_url\": self.url})\n\n except (\n youtube_transcript_api.TranscriptsDisabled,\n youtube_transcript_api.NoTranscriptFound,\n youtube_transcript_api.CouldNotRetrieveTranscript,\n ) as exc:\n default_data[\"error\"] = str(exc)\n return Data(data=default_data)\n"
+ "value": "import pandas as pd\nimport youtube_transcript_api\nfrom langchain_community.document_loaders import YoutubeLoader\nfrom langchain_community.document_loaders.youtube import TranscriptFormat\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DropdownInput, IntInput, MultilineInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.template.field.base import Output\n\n\nclass YouTubeTranscriptsComponent(Component):\n \"\"\"A component that extracts spoken content from YouTube videos as transcripts.\"\"\"\n\n display_name: str = \"YouTube Transcripts\"\n description: str = \"Extracts spoken content from YouTube videos with multiple output options.\"\n icon: str = \"YouTube\"\n name = \"YouTubeTranscripts\"\n\n inputs = [\n MultilineInput(\n name=\"url\",\n display_name=\"Video URL\",\n info=\"Enter the YouTube video URL to get transcripts from.\",\n tool_mode=True,\n required=True,\n ),\n IntInput(\n name=\"chunk_size_seconds\",\n display_name=\"Chunk Size (seconds)\",\n value=60,\n info=\"The size of each transcript chunk in seconds.\",\n ),\n DropdownInput(\n name=\"translation\",\n display_name=\"Translation Language\",\n advanced=True,\n options=[\"\", \"en\", \"es\", \"fr\", \"de\", \"it\", \"pt\", \"ru\", \"ja\", \"ko\", \"hi\", \"ar\", \"id\"],\n info=\"Translate the transcripts to the specified language. Leave empty for no translation.\",\n ),\n ]\n\n outputs = [\n Output(name=\"dataframe\", display_name=\"Chunks\", method=\"get_dataframe_output\"),\n Output(name=\"message\", display_name=\"Transcript\", method=\"get_message_output\"),\n Output(name=\"data_output\", display_name=\"Transcript + Source\", method=\"get_data_output\"),\n ]\n\n def _load_transcripts(self, *, as_chunks: bool = True):\n \"\"\"Internal method to load transcripts from YouTube.\"\"\"\n loader = YoutubeLoader.from_youtube_url(\n self.url,\n transcript_format=TranscriptFormat.CHUNKS if as_chunks else TranscriptFormat.TEXT,\n chunk_size_seconds=self.chunk_size_seconds,\n translation=self.translation or None,\n )\n return loader.load()\n\n def get_dataframe_output(self) -> DataFrame:\n \"\"\"Provides transcript output as a DataFrame with timestamp and text columns.\"\"\"\n try:\n transcripts = self._load_transcripts(as_chunks=True)\n\n # Create DataFrame with timestamp and text columns\n data = []\n for doc in transcripts:\n start_seconds = int(doc.metadata[\"start_seconds\"])\n start_minutes = start_seconds // 60\n start_seconds %= 60\n timestamp = f\"{start_minutes:02d}:{start_seconds:02d}\"\n data.append({\"timestamp\": timestamp, \"text\": doc.page_content})\n\n return DataFrame(pd.DataFrame(data))\n\n except (youtube_transcript_api.TranscriptsDisabled, youtube_transcript_api.NoTranscriptFound) as exc:\n return DataFrame(pd.DataFrame({\"error\": [f\"Failed to get YouTube transcripts: {exc!s}\"]}))\n\n def get_message_output(self) -> Message:\n \"\"\"Provides transcript output as continuous text.\"\"\"\n try:\n transcripts = self._load_transcripts(as_chunks=False)\n result = transcripts[0].page_content\n return Message(text=result)\n\n except (youtube_transcript_api.TranscriptsDisabled, youtube_transcript_api.NoTranscriptFound) as exc:\n error_msg = f\"Failed to get YouTube transcripts: {exc!s}\"\n return Message(text=error_msg)\n\n def get_data_output(self) -> Data:\n \"\"\"Creates a structured data object with transcript and metadata.\n\n Returns a Data object containing transcript text, video URL, and any error\n messages that occurred during processing. The object includes:\n - 'transcript': continuous text from the entire video (concatenated if multiple parts)\n - 'video_url': the input YouTube URL\n - 'error': error message if an exception occurs\n \"\"\"\n default_data = {\"transcript\": \"\", \"video_url\": self.url, \"error\": None}\n\n try:\n transcripts = self._load_transcripts(as_chunks=False)\n if not transcripts:\n default_data[\"error\"] = \"No transcripts found.\"\n return Data(data=default_data)\n\n # Combine all transcript parts\n full_transcript = \" \".join(doc.page_content for doc in transcripts)\n return Data(data={\"transcript\": full_transcript, \"video_url\": self.url})\n\n except (\n youtube_transcript_api.TranscriptsDisabled,\n youtube_transcript_api.NoTranscriptFound,\n youtube_transcript_api.CouldNotRetrieveTranscript,\n ) as exc:\n default_data[\"error\"] = str(exc)\n return Data(data=default_data)\n"
},
"tools_metadata": {
"_input_type": "TableInput",
@@ -2718,7 +2718,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
+ "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
@@ -3102,7 +3102,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "import re\n\nfrom langflow.custom import Component\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MessageInput, MessageTextInput, Output\nfrom langflow.schema.message import Message\n\n\nclass ConditionalRouterComponent(Component):\n display_name = \"If-Else\"\n description = \"Routes an input message to a corresponding output based on text comparison.\"\n icon = \"split\"\n name = \"ConditionalRouter\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.__iteration_updated = False\n\n inputs = [\n MessageTextInput(\n name=\"input_text\",\n display_name=\"Text Input\",\n info=\"The primary text input for the operation.\",\n required=True,\n ),\n MessageTextInput(\n name=\"match_text\",\n display_name=\"Match Text\",\n info=\"The text input to compare against.\",\n required=True,\n ),\n DropdownInput(\n name=\"operator\",\n display_name=\"Operator\",\n options=[\"equals\", \"not equals\", \"contains\", \"starts with\", \"ends with\", \"regex\"],\n info=\"The operator to apply for comparing the texts.\",\n value=\"equals\",\n real_time_refresh=True,\n ),\n BoolInput(\n name=\"case_sensitive\",\n display_name=\"Case Sensitive\",\n info=\"If true, the comparison will be case sensitive.\",\n value=True,\n advanced=True,\n ),\n MessageInput(\n name=\"message\",\n display_name=\"Alternative Output\",\n info=\"The message to pass through either route.\",\n advanced=True,\n ),\n IntInput(\n name=\"max_iterations\",\n display_name=\"Max Iterations\",\n info=\"The maximum number of iterations for the conditional router.\",\n value=10,\n advanced=True,\n ),\n DropdownInput(\n name=\"default_route\",\n display_name=\"Default Route\",\n options=[\"true_result\", \"false_result\"],\n info=\"The default route to take when max iterations are reached.\",\n value=\"false_result\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"True\", name=\"true_result\", method=\"true_response\", group_outputs=True),\n Output(display_name=\"False\", name=\"false_result\", method=\"false_response\", group_outputs=True),\n ]\n\n def _pre_run_setup(self):\n self.__iteration_updated = False\n\n def evaluate_condition(self, input_text: str, match_text: str, operator: str, *, case_sensitive: bool) -> bool:\n if not case_sensitive and operator != \"regex\":\n input_text = input_text.lower()\n match_text = match_text.lower()\n\n if operator == \"equals\":\n return input_text == match_text\n if operator == \"not equals\":\n return input_text != match_text\n if operator == \"contains\":\n return match_text in input_text\n if operator == \"starts with\":\n return input_text.startswith(match_text)\n if operator == \"ends with\":\n return input_text.endswith(match_text)\n if operator == \"regex\":\n try:\n return bool(re.match(match_text, input_text))\n except re.error:\n return False # Return False if the regex is invalid\n return False\n\n def iterate_and_stop_once(self, route_to_stop: str):\n if not self.__iteration_updated:\n self.update_ctx({f\"{self._id}_iteration\": self.ctx.get(f\"{self._id}_iteration\", 0) + 1})\n self.__iteration_updated = True\n if self.ctx.get(f\"{self._id}_iteration\", 0) >= self.max_iterations and route_to_stop == self.default_route:\n route_to_stop = \"true_result\" if route_to_stop == \"false_result\" else \"false_result\"\n self.stop(route_to_stop)\n\n def true_response(self) -> Message:\n result = self.evaluate_condition(\n self.input_text, self.match_text, self.operator, case_sensitive=self.case_sensitive\n )\n if result:\n self.status = self.message\n self.iterate_and_stop_once(\"false_result\")\n return self.message\n self.iterate_and_stop_once(\"true_result\")\n return Message(content=\"\")\n\n def false_response(self) -> Message:\n result = self.evaluate_condition(\n self.input_text, self.match_text, self.operator, case_sensitive=self.case_sensitive\n )\n if not result:\n self.status = self.message\n self.iterate_and_stop_once(\"true_result\")\n return self.message\n self.iterate_and_stop_once(\"false_result\")\n return Message(content=\"\")\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n if field_name == \"operator\":\n if field_value == \"regex\":\n build_config.pop(\"case_sensitive\", None)\n\n # Ensure case_sensitive is present for all other operators\n elif \"case_sensitive\" not in build_config:\n case_sensitive_input = next(\n (input_field for input_field in self.inputs if input_field.name == \"case_sensitive\"), None\n )\n if case_sensitive_input:\n build_config[\"case_sensitive\"] = case_sensitive_input.to_dict()\n return build_config\n"
+ "value": "import re\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MessageInput, MessageTextInput, Output\nfrom langflow.schema.message import Message\n\n\nclass ConditionalRouterComponent(Component):\n display_name = \"If-Else\"\n description = \"Routes an input message to a corresponding output based on text comparison.\"\n icon = \"split\"\n name = \"ConditionalRouter\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.__iteration_updated = False\n\n inputs = [\n MessageTextInput(\n name=\"input_text\",\n display_name=\"Text Input\",\n info=\"The primary text input for the operation.\",\n required=True,\n ),\n MessageTextInput(\n name=\"match_text\",\n display_name=\"Match Text\",\n info=\"The text input to compare against.\",\n required=True,\n ),\n DropdownInput(\n name=\"operator\",\n display_name=\"Operator\",\n options=[\"equals\", \"not equals\", \"contains\", \"starts with\", \"ends with\", \"regex\"],\n info=\"The operator to apply for comparing the texts.\",\n value=\"equals\",\n real_time_refresh=True,\n ),\n BoolInput(\n name=\"case_sensitive\",\n display_name=\"Case Sensitive\",\n info=\"If true, the comparison will be case sensitive.\",\n value=True,\n advanced=True,\n ),\n MessageInput(\n name=\"message\",\n display_name=\"Alternative Output\",\n info=\"The message to pass through either route.\",\n advanced=True,\n ),\n IntInput(\n name=\"max_iterations\",\n display_name=\"Max Iterations\",\n info=\"The maximum number of iterations for the conditional router.\",\n value=10,\n advanced=True,\n ),\n DropdownInput(\n name=\"default_route\",\n display_name=\"Default Route\",\n options=[\"true_result\", \"false_result\"],\n info=\"The default route to take when max iterations are reached.\",\n value=\"false_result\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"True\", name=\"true_result\", method=\"true_response\", group_outputs=True),\n Output(display_name=\"False\", name=\"false_result\", method=\"false_response\", group_outputs=True),\n ]\n\n def _pre_run_setup(self):\n self.__iteration_updated = False\n\n def evaluate_condition(self, input_text: str, match_text: str, operator: str, *, case_sensitive: bool) -> bool:\n if not case_sensitive and operator != \"regex\":\n input_text = input_text.lower()\n match_text = match_text.lower()\n\n if operator == \"equals\":\n return input_text == match_text\n if operator == \"not equals\":\n return input_text != match_text\n if operator == \"contains\":\n return match_text in input_text\n if operator == \"starts with\":\n return input_text.startswith(match_text)\n if operator == \"ends with\":\n return input_text.endswith(match_text)\n if operator == \"regex\":\n try:\n return bool(re.match(match_text, input_text))\n except re.error:\n return False # Return False if the regex is invalid\n return False\n\n def iterate_and_stop_once(self, route_to_stop: str):\n if not self.__iteration_updated:\n self.update_ctx({f\"{self._id}_iteration\": self.ctx.get(f\"{self._id}_iteration\", 0) + 1})\n self.__iteration_updated = True\n if self.ctx.get(f\"{self._id}_iteration\", 0) >= self.max_iterations and route_to_stop == self.default_route:\n route_to_stop = \"true_result\" if route_to_stop == \"false_result\" else \"false_result\"\n self.stop(route_to_stop)\n\n def true_response(self) -> Message:\n result = self.evaluate_condition(\n self.input_text, self.match_text, self.operator, case_sensitive=self.case_sensitive\n )\n if result:\n self.status = self.message\n self.iterate_and_stop_once(\"false_result\")\n return self.message\n self.iterate_and_stop_once(\"true_result\")\n return Message(content=\"\")\n\n def false_response(self) -> Message:\n result = self.evaluate_condition(\n self.input_text, self.match_text, self.operator, case_sensitive=self.case_sensitive\n )\n if not result:\n self.status = self.message\n self.iterate_and_stop_once(\"true_result\")\n return self.message\n self.iterate_and_stop_once(\"false_result\")\n return Message(content=\"\")\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n if field_name == \"operator\":\n if field_value == \"regex\":\n build_config.pop(\"case_sensitive\", None)\n\n # Ensure case_sensitive is present for all other operators\n elif \"case_sensitive\" not in build_config:\n case_sensitive_input = next(\n (input_field for input_field in self.inputs if input_field.name == \"case_sensitive\"), None\n )\n if case_sensitive_input:\n build_config[\"case_sensitive\"] = case_sensitive_input.to_dict()\n return build_config\n"
},
"default_route": {
"_input_type": "DropdownInput",
diff --git a/src/backend/base/langflow/interface/initialize/loading.py b/src/backend/base/langflow/interface/initialize/loading.py
index daf44c663..f20629c0b 100644
--- a/src/backend/base/langflow/interface/initialize/loading.py
+++ b/src/backend/base/langflow/interface/initialize/loading.py
@@ -10,12 +10,13 @@ from loguru import logger
from pydantic import PydanticDeprecatedSince20
from langflow.custom.eval import eval_custom_component_code
-from langflow.schema import Data
from langflow.schema.artifact import get_artifact_type, post_process_raw
+from langflow.schema.data import Data
from langflow.services.deps import get_tracing_service
if TYPE_CHECKING:
- from langflow.custom import Component, CustomComponent
+ from langflow.custom.custom_component.component import Component
+ from langflow.custom.custom_component.custom_component import CustomComponent
from langflow.events.event_manager import EventManager
from langflow.graph.vertex.base import Vertex
diff --git a/src/backend/base/langflow/load/load.py b/src/backend/base/langflow/load/load.py
index 4dbf8e415..502195e12 100644
--- a/src/backend/base/langflow/load/load.py
+++ b/src/backend/base/langflow/load/load.py
@@ -6,7 +6,7 @@ from aiofile import async_open
from dotenv import dotenv_values
from loguru import logger
-from langflow.graph import Graph
+from langflow.graph.graph.base import Graph
from langflow.graph.schema import RunOutputs
from langflow.load.utils import replace_tweaks_with_env
from langflow.logging.logger import configure
diff --git a/src/backend/base/langflow/services/database/models/api_key/crud.py b/src/backend/base/langflow/services/database/models/api_key/crud.py
index 31dcae568..d56fbf1c0 100644
--- a/src/backend/base/langflow/services/database/models/api_key/crud.py
+++ b/src/backend/base/langflow/services/database/models/api_key/crud.py
@@ -7,8 +7,8 @@ from sqlalchemy.orm import selectinload
from sqlmodel import select
from sqlmodel.ext.asyncio.session import AsyncSession
-from langflow.services.database.models import User
-from langflow.services.database.models.api_key import ApiKey, ApiKeyCreate, ApiKeyRead, UnmaskedApiKeyRead
+from langflow.services.database.models.api_key.model import ApiKey, ApiKeyCreate, ApiKeyRead, UnmaskedApiKeyRead
+from langflow.services.database.models.user.model import User
from langflow.services.deps import get_settings_service, session_scope
if TYPE_CHECKING:
diff --git a/src/backend/base/langflow/services/database/models/api_key/model.py b/src/backend/base/langflow/services/database/models/api_key/model.py
index 473187b18..f71013140 100644
--- a/src/backend/base/langflow/services/database/models/api_key/model.py
+++ b/src/backend/base/langflow/services/database/models/api_key/model.py
@@ -8,7 +8,7 @@ from sqlmodel import Column, DateTime, Field, Relationship, SQLModel, func
from langflow.schema.serialize import UUIDstr
if TYPE_CHECKING:
- from langflow.services.database.models.user import User
+ from langflow.services.database.models.user.model import User
def utc_now():
diff --git a/src/backend/base/langflow/services/database/models/flow/model.py b/src/backend/base/langflow/services/database/models/flow/model.py
index 767dff564..458c58c91 100644
--- a/src/backend/base/langflow/services/database/models/flow/model.py
+++ b/src/backend/base/langflow/services/database/models/flow/model.py
@@ -20,11 +20,11 @@ from sqlalchemy import Enum as SQLEnum
from sqlalchemy import Text, UniqueConstraint, text
from sqlmodel import JSON, Column, Field, Relationship, SQLModel
-from langflow.schema import Data
+from langflow.schema.data import Data
if TYPE_CHECKING:
- from langflow.services.database.models.folder import Folder
- from langflow.services.database.models.user import User
+ from langflow.services.database.models.folder.model import Folder
+ from langflow.services.database.models.user.model import User
HEX_COLOR_LENGTH = 7
diff --git a/src/backend/base/langflow/services/database/models/user/model.py b/src/backend/base/langflow/services/database/models/user/model.py
index 7bd4606d2..e39fe3e68 100644
--- a/src/backend/base/langflow/services/database/models/user/model.py
+++ b/src/backend/base/langflow/services/database/models/user/model.py
@@ -9,10 +9,10 @@ from sqlmodel import Field, Relationship, SQLModel
from langflow.schema.serialize import UUIDstr
if TYPE_CHECKING:
- from langflow.services.database.models.api_key import ApiKey
- from langflow.services.database.models.flow import Flow
- from langflow.services.database.models.folder import Folder
- from langflow.services.database.models.variable import Variable
+ from langflow.services.database.models.api_key.model import ApiKey
+ from langflow.services.database.models.flow.model import Flow
+ from langflow.services.database.models.folder.model import Folder
+ from langflow.services.database.models.variable.model import Variable
class UserOptin(BaseModel):
diff --git a/src/backend/base/langflow/services/shared_component_cache/service.py b/src/backend/base/langflow/services/shared_component_cache/service.py
index 3f401cac3..40f76c03d 100644
--- a/src/backend/base/langflow/services/shared_component_cache/service.py
+++ b/src/backend/base/langflow/services/shared_component_cache/service.py
@@ -1,4 +1,4 @@
-from langflow.services.cache import ThreadingInMemoryCache
+from langflow.services.cache.service import ThreadingInMemoryCache
class SharedComponentCacheService(ThreadingInMemoryCache):
diff --git a/src/backend/base/langflow/utils/data_structure.py b/src/backend/base/langflow/utils/data_structure.py
index 5ba5023b9..9ce03b298 100644
--- a/src/backend/base/langflow/utils/data_structure.py
+++ b/src/backend/base/langflow/utils/data_structure.py
@@ -2,7 +2,7 @@ import json
from collections import Counter
from typing import Any
-from langflow.schema import Data
+from langflow.schema.data import Data
def infer_list_type(items: list, max_samples: int = 5) -> str:
diff --git a/src/backend/base/langflow/utils/util.py b/src/backend/base/langflow/utils/util.py
index 0ead64811..5179d66b2 100644
--- a/src/backend/base/langflow/utils/util.py
+++ b/src/backend/base/langflow/utils/util.py
@@ -10,7 +10,7 @@ from typing import Any
from docstring_parser import parse
from langflow.logging.logger import logger
-from langflow.schema import Data
+from langflow.schema.data import Data
from langflow.services.deps import get_settings_service
from langflow.services.utils import initialize_settings_service
from langflow.template.frontend_node.constants import FORCE_SHOW_FIELDS
diff --git a/src/backend/tests/unit/base/tools/test_component_toolkit.py b/src/backend/tests/unit/base/tools/test_component_toolkit.py
index 469a2ed0f..21fcb6de7 100644
--- a/src/backend/tests/unit/base/tools/test_component_toolkit.py
+++ b/src/backend/tests/unit/base/tools/test_component_toolkit.py
@@ -6,7 +6,7 @@ from langflow.components.input_output.chat_output import ChatOutput
from langflow.components.langchain_utilities import ToolCallingAgentComponent
from langflow.components.languagemodels import OpenAIModelComponent
from langflow.components.tools.calculator import CalculatorToolComponent
-from langflow.graph import Graph
+from langflow.graph.graph.base import Graph
from langflow.schema.data import Data
from pydantic import BaseModel
diff --git a/src/backend/tests/unit/components/languagemodels/test_deepseek.py b/src/backend/tests/unit/components/languagemodels/test_deepseek.py
index e0a9d91bc..4a8c400ba 100644
--- a/src/backend/tests/unit/components/languagemodels/test_deepseek.py
+++ b/src/backend/tests/unit/components/languagemodels/test_deepseek.py
@@ -2,7 +2,7 @@ from unittest.mock import MagicMock
import pytest
from langflow.components.languagemodels import DeepSeekModelComponent
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.custom.utils import build_custom_component_template
diff --git a/src/backend/tests/unit/components/languagemodels/test_xai.py b/src/backend/tests/unit/components/languagemodels/test_xai.py
index 720b1c178..674c7b4da 100644
--- a/src/backend/tests/unit/components/languagemodels/test_xai.py
+++ b/src/backend/tests/unit/components/languagemodels/test_xai.py
@@ -2,9 +2,9 @@ from unittest.mock import MagicMock, patch
import pytest
from langflow.components.languagemodels import XAIModelComponent
-from langflow.custom import Component
+from langflow.custom.custom_component.component import Component
from langflow.custom.utils import build_custom_component_template
-from langflow.inputs import (
+from langflow.inputs.inputs import (
BoolInput,
DictInput,
DropdownInput,
diff --git a/src/backend/tests/unit/components/models/test_language_model_component.py b/src/backend/tests/unit/components/models/test_language_model_component.py
index f03be6b15..52f090f89 100644
--- a/src/backend/tests/unit/components/models/test_language_model_component.py
+++ b/src/backend/tests/unit/components/models/test_language_model_component.py
@@ -3,7 +3,7 @@ from unittest.mock import MagicMock, patch
import pytest
from langflow.base.models.anthropic_constants import ANTHROPIC_MODELS
from langflow.base.models.openai_constants import OPENAI_MODEL_NAMES
-from langflow.components.models import LanguageModelComponent
+from langflow.components.models.language_model import LanguageModelComponent
from tests.base import ComponentTestBaseWithClient
diff --git a/src/backend/tests/unit/graph/edge/test_edge_base.py b/src/backend/tests/unit/graph/edge/test_edge_base.py
index 2f53aed10..0cd7ad436 100644
--- a/src/backend/tests/unit/graph/edge/test_edge_base.py
+++ b/src/backend/tests/unit/graph/edge/test_edge_base.py
@@ -4,7 +4,7 @@ import pytest
from langflow.components.input_output import ChatInput, ChatOutput
from langflow.components.languagemodels import OpenAIModelComponent
from langflow.components.prompts import PromptComponent
-from langflow.graph import Graph
+from langflow.graph.graph.base import Graph
def test_edge_raises_error_on_invalid_target_handle():
diff --git a/src/backend/tests/unit/graph/graph/test_cycles.py b/src/backend/tests/unit/graph/graph/test_cycles.py
index ca5d807f5..0400cdace 100644
--- a/src/backend/tests/unit/graph/graph/test_cycles.py
+++ b/src/backend/tests/unit/graph/graph/test_cycles.py
@@ -6,8 +6,8 @@ from langflow.components.input_output.text import TextInputComponent
from langflow.components.languagemodels import OpenAIModelComponent
from langflow.components.logic.conditional_router import ConditionalRouterComponent
from langflow.components.prompts import PromptComponent
-from langflow.custom import Component
-from langflow.graph import Graph
+from langflow.custom.custom_component.component import Component
+from langflow.graph.graph.base import Graph
from langflow.graph.graph.utils import find_cycle_vertices
from langflow.io import MessageTextInput, Output
from langflow.schema.message import Message
diff --git a/src/backend/tests/unit/graph/graph/test_graph_state_model.py b/src/backend/tests/unit/graph/graph/test_graph_state_model.py
index adddfe447..07d6cd112 100644
--- a/src/backend/tests/unit/graph/graph/test_graph_state_model.py
+++ b/src/backend/tests/unit/graph/graph/test_graph_state_model.py
@@ -6,7 +6,7 @@ from langflow.components.input_output import ChatInput, ChatOutput
from langflow.components.languagemodels import OpenAIModelComponent
from langflow.components.processing.converter import TypeConverterComponent
from langflow.components.prompts import PromptComponent
-from langflow.graph import Graph
+from langflow.graph.graph.base import Graph
from langflow.graph.graph.constants import Finish
from langflow.graph.graph.state_model import create_state_model_from_graph
diff --git a/src/backend/tests/unit/initial_setup/starter_projects/test_memory_chatbot.py b/src/backend/tests/unit/initial_setup/starter_projects/test_memory_chatbot.py
index 343c2ab36..fed2d21b7 100644
--- a/src/backend/tests/unit/initial_setup/starter_projects/test_memory_chatbot.py
+++ b/src/backend/tests/unit/initial_setup/starter_projects/test_memory_chatbot.py
@@ -8,7 +8,7 @@ from langflow.components.input_output import ChatInput, ChatOutput
from langflow.components.languagemodels import OpenAIModelComponent
from langflow.components.processing.converter import TypeConverterComponent
from langflow.components.prompts import PromptComponent
-from langflow.graph import Graph
+from langflow.graph.graph.base import Graph
from langflow.graph.graph.constants import Finish
if TYPE_CHECKING:
diff --git a/src/backend/tests/unit/initial_setup/starter_projects/test_vector_store_rag.py b/src/backend/tests/unit/initial_setup/starter_projects/test_vector_store_rag.py
index 54193c0f0..9c16f8899 100644
--- a/src/backend/tests/unit/initial_setup/starter_projects/test_vector_store_rag.py
+++ b/src/backend/tests/unit/initial_setup/starter_projects/test_vector_store_rag.py
@@ -11,7 +11,7 @@ from langflow.components.processing import ParseDataComponent
from langflow.components.processing.split_text import SplitTextComponent
from langflow.components.prompts import PromptComponent
from langflow.components.vectorstores import AstraDBVectorStoreComponent
-from langflow.graph import Graph
+from langflow.graph.graph.base import Graph
from langflow.graph.graph.constants import Finish
from langflow.schema import Data
from langflow.schema.dataframe import DataFrame
diff --git a/uv.lock b/uv.lock
index a583a0216..ca9087366 100644
--- a/uv.lock
+++ b/uv.lock
@@ -1232,7 +1232,7 @@ wheels = [
[[package]]
name = "codeflash"
-version = "0.13.0"
+version = "0.13.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "click" },
@@ -1261,7 +1261,7 @@ dependencies = [
{ name = "unidiff" },
{ name = "unittest-xml-reporting" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/48/6a/c701cc1a269079600dbbe95cb81a38729db8d078d011f246435bf2dad6c7/codeflash-0.13.0.tar.gz", hash = "sha256:7f26e9d24d3865a061244a849f38e7a565ddf795747553ad4f2ad6d98f59b43e", size = 158156, upload-time = "2025-06-04T00:13:17.936Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/03/e9/cfa5423636d75ed4d9b55609403b2d97a4c1619e37d446d67e14b1ff0234/codeflash-0.13.1.tar.gz", hash = "sha256:fbd31f95904f2110560eb621ba65eb5658ac659cbb67d674560dd5f859178d0e", size = 158209, upload-time = "2025-06-04T20:51:45.515Z" }
[[package]]
name = "cohere"
@@ -2565,7 +2565,7 @@ wheels = [
[[package]]
name = "gevent"
-version = "24.11.1"
+version = "25.5.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "cffi", marker = "platform_python_implementation == 'CPython' and sys_platform == 'win32'" },
@@ -2573,41 +2573,41 @@ dependencies = [
{ name = "zope-event" },
{ name = "zope-interface" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/ab/75/a53f1cb732420f5e5d79b2563fc3504d22115e7ecfe7966e5cf9b3582ae7/gevent-24.11.1.tar.gz", hash = "sha256:8bd1419114e9e4a3ed33a5bad766afff9a3cf765cb440a582a1b3a9bc80c1aca", size = 5976624, upload-time = "2024-11-11T15:36:45.991Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/f1/58/267e8160aea00ab00acd2de97197eecfe307064a376fb5c892870a8a6159/gevent-25.5.1.tar.gz", hash = "sha256:582c948fa9a23188b890d0bc130734a506d039a2e5ad87dae276a456cc683e61", size = 6388207, upload-time = "2025-05-12T12:57:59.833Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/36/7d/27ed3603f4bf96b36fb2746e923e033bc600c6684de8fe164d64eb8c4dcc/gevent-24.11.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:92fe5dfee4e671c74ffaa431fd7ffd0ebb4b339363d24d0d944de532409b935e", size = 2998254, upload-time = "2024-11-11T14:33:11.56Z" },
- { url = "https://files.pythonhosted.org/packages/a8/03/a8f6c70f50a644a79e75d9f15e6f1813115d34c3c55528e4669a9316534d/gevent-24.11.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7bfcfe08d038e1fa6de458891bca65c1ada6d145474274285822896a858c870", size = 4817711, upload-time = "2024-11-11T15:20:01.335Z" },
- { url = "https://files.pythonhosted.org/packages/f0/05/4f9bc565520a18f107464d40ac15a91708431362c797e77fbb5e7ff26e64/gevent-24.11.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7398c629d43b1b6fd785db8ebd46c0a353880a6fab03d1cf9b6788e7240ee32e", size = 4934468, upload-time = "2024-11-11T15:20:57.845Z" },
- { url = "https://files.pythonhosted.org/packages/4a/7d/f15561eeebecbebc0296dd7bebea10ac4af0065d98249e3d8c4998e68edd/gevent-24.11.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d7886b63ebfb865178ab28784accd32f287d5349b3ed71094c86e4d3ca738af5", size = 5014067, upload-time = "2024-11-11T15:22:42.373Z" },
- { url = "https://files.pythonhosted.org/packages/67/c1/07eff117a600fc3c9bd4e3a1ff3b726f146ee23ce55981156547ccae0c85/gevent-24.11.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9ca80711e6553880974898d99357fb649e062f9058418a92120ca06c18c3c59", size = 6625531, upload-time = "2024-11-11T14:57:07.419Z" },
- { url = "https://files.pythonhosted.org/packages/4b/72/43f76ab6b18e5e56b1003c844829971f3044af08b39b3c9040559be00a2b/gevent-24.11.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e24181d172f50097ac8fc272c8c5b030149b630df02d1c639ee9f878a470ba2b", size = 5249671, upload-time = "2024-11-11T15:37:02.773Z" },
- { url = "https://files.pythonhosted.org/packages/6b/fc/1a847ada0757cc7690f83959227514b1a52ff6de504619501c81805fa1da/gevent-24.11.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1d4fadc319b13ef0a3c44d2792f7918cf1bca27cacd4d41431c22e6b46668026", size = 6773903, upload-time = "2024-11-11T15:03:42.395Z" },
- { url = "https://files.pythonhosted.org/packages/3b/9d/254dcf455f6659ab7e36bec0bc11f51b18ea25eac2de69185e858ccf3c30/gevent-24.11.1-cp310-cp310-win_amd64.whl", hash = "sha256:3d882faa24f347f761f934786dde6c73aa6c9187ee710189f12dcc3a63ed4a50", size = 1560443, upload-time = "2024-11-11T15:10:20.185Z" },
- { url = "https://files.pythonhosted.org/packages/ea/fd/86a170f77ef51a15297573c50dbec4cc67ddc98b677cc2d03cc7f2927f4c/gevent-24.11.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:351d1c0e4ef2b618ace74c91b9b28b3eaa0dd45141878a964e03c7873af09f62", size = 2951424, upload-time = "2024-11-11T14:32:36.451Z" },
- { url = "https://files.pythonhosted.org/packages/7f/0a/987268c9d446f61883bc627c77c5ed4a97869c0f541f76661a62b2c411f6/gevent-24.11.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5efe72e99b7243e222ba0c2c2ce9618d7d36644c166d63373af239da1036bab", size = 4878504, upload-time = "2024-11-11T15:20:03.521Z" },
- { url = "https://files.pythonhosted.org/packages/dc/d4/2f77ddd837c0e21b4a4460bcb79318b6754d95ef138b7a29f3221c7e9993/gevent-24.11.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d3b249e4e1f40c598ab8393fc01ae6a3b4d51fc1adae56d9ba5b315f6b2d758", size = 5007668, upload-time = "2024-11-11T15:21:00.422Z" },
- { url = "https://files.pythonhosted.org/packages/80/a0/829e0399a1f9b84c344b72d2be9aa60fe2a64e993cac221edcc14f069679/gevent-24.11.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81d918e952954675f93fb39001da02113ec4d5f4921bf5a0cc29719af6824e5d", size = 5067055, upload-time = "2024-11-11T15:22:44.279Z" },
- { url = "https://files.pythonhosted.org/packages/1e/67/0e693f9ddb7909c2414f8fcfc2409aa4157884c147bc83dab979e9cf717c/gevent-24.11.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9c935b83d40c748b6421625465b7308d87c7b3717275acd587eef2bd1c39546", size = 6761883, upload-time = "2024-11-11T14:57:09.359Z" },
- { url = "https://files.pythonhosted.org/packages/fa/b6/b69883fc069d7148dd23c5dda20826044e54e7197f3c8e72b8cc2cd4035a/gevent-24.11.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff96c5739834c9a594db0e12bf59cb3fa0e5102fc7b893972118a3166733d61c", size = 5440802, upload-time = "2024-11-11T15:37:04.983Z" },
- { url = "https://files.pythonhosted.org/packages/32/4e/b00094d995ff01fd88b3cf6b9d1d794f935c31c645c431e65cd82d808c9c/gevent-24.11.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d6c0a065e31ef04658f799215dddae8752d636de2bed61365c358f9c91e7af61", size = 6866992, upload-time = "2024-11-11T15:03:44.208Z" },
- { url = "https://files.pythonhosted.org/packages/37/ed/58dbe9fb09d36f6477ff8db0459ebd3be9a77dc05ae5d96dc91ad657610d/gevent-24.11.1-cp311-cp311-win_amd64.whl", hash = "sha256:97e2f3999a5c0656f42065d02939d64fffaf55861f7d62b0107a08f52c984897", size = 1543736, upload-time = "2024-11-11T15:03:06.121Z" },
- { url = "https://files.pythonhosted.org/packages/dd/32/301676f67ffa996ff1c4175092fb0c48c83271cc95e5c67650b87156b6cf/gevent-24.11.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:a3d75fa387b69c751a3d7c5c3ce7092a171555126e136c1d21ecd8b50c7a6e46", size = 2956467, upload-time = "2024-11-11T14:32:33.238Z" },
- { url = "https://files.pythonhosted.org/packages/6b/84/aef1a598123cef2375b6e2bf9d17606b961040f8a10e3dcc3c3dd2a99f05/gevent-24.11.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:beede1d1cff0c6fafae3ab58a0c470d7526196ef4cd6cc18e7769f207f2ea4eb", size = 5136486, upload-time = "2024-11-11T15:20:04.972Z" },
- { url = "https://files.pythonhosted.org/packages/92/7b/04f61187ee1df7a913b3fca63b0a1206c29141ab4d2a57e7645237b6feb5/gevent-24.11.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:85329d556aaedced90a993226d7d1186a539c843100d393f2349b28c55131c85", size = 5299718, upload-time = "2024-11-11T15:21:03.354Z" },
- { url = "https://files.pythonhosted.org/packages/36/2a/ebd12183ac25eece91d084be2111e582b061f4d15ead32239b43ed47e9ba/gevent-24.11.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:816b3883fa6842c1cf9d2786722014a0fd31b6312cca1f749890b9803000bad6", size = 5400118, upload-time = "2024-11-11T15:22:45.897Z" },
- { url = "https://files.pythonhosted.org/packages/ec/c9/f006c0cd59f0720fbb62ee11da0ad4c4c0fd12799afd957dd491137e80d9/gevent-24.11.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b24d800328c39456534e3bc3e1684a28747729082684634789c2f5a8febe7671", size = 6775163, upload-time = "2024-11-11T14:57:11.991Z" },
- { url = "https://files.pythonhosted.org/packages/49/f1/5edf00b674b10d67e3b967c2d46b8a124c2bc8cfd59d4722704392206444/gevent-24.11.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a5f1701ce0f7832f333dd2faf624484cbac99e60656bfbb72504decd42970f0f", size = 5479886, upload-time = "2024-11-11T15:37:06.558Z" },
- { url = "https://files.pythonhosted.org/packages/22/11/c48e62744a32c0d48984268ae62b99edb81eaf0e03b42de52e2f09855509/gevent-24.11.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:d740206e69dfdfdcd34510c20adcb9777ce2cc18973b3441ab9767cd8948ca8a", size = 6891452, upload-time = "2024-11-11T15:03:46.892Z" },
- { url = "https://files.pythonhosted.org/packages/11/b2/5d20664ef6a077bec9f27f7a7ee761edc64946d0b1e293726a3d074a9a18/gevent-24.11.1-cp312-cp312-win_amd64.whl", hash = "sha256:68bee86b6e1c041a187347ef84cf03a792f0b6c7238378bf6ba4118af11feaae", size = 1541631, upload-time = "2024-11-11T14:55:34.977Z" },
- { url = "https://files.pythonhosted.org/packages/a4/8f/4958e70caeaf469c576ecc5b5f2cb49ddaad74336fa82363d89cddb3c284/gevent-24.11.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:d618e118fdb7af1d6c1a96597a5cd6ac84a9f3732b5be8515c6a66e098d498b6", size = 2949601, upload-time = "2024-11-11T14:32:35.002Z" },
- { url = "https://files.pythonhosted.org/packages/3b/64/79892d250b7b2aa810688dfebe783aec02568e5cecacb1e100acbb9d95c6/gevent-24.11.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2142704c2adce9cd92f6600f371afb2860a446bfd0be5bd86cca5b3e12130766", size = 5107052, upload-time = "2024-11-11T15:20:07.219Z" },
- { url = "https://files.pythonhosted.org/packages/66/44/9ee0ed1909b4f41375e32bf10036d5d8624962afcbd901573afdecd2e36a/gevent-24.11.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:92e0d7759de2450a501effd99374256b26359e801b2d8bf3eedd3751973e87f5", size = 5271736, upload-time = "2024-11-11T15:21:05.953Z" },
- { url = "https://files.pythonhosted.org/packages/e3/48/0184b2622a388a256199c5fadcad6b52b6455019c2a4b19edd6de58e30ba/gevent-24.11.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca845138965c8c56d1550499d6b923eb1a2331acfa9e13b817ad8305dde83d11", size = 5367782, upload-time = "2024-11-11T15:22:48.15Z" },
- { url = "https://files.pythonhosted.org/packages/9a/b1/1a2704c346234d889d2e0042efb182534f7d294115f0e9f99d8079fa17eb/gevent-24.11.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:356b73d52a227d3313f8f828025b665deada57a43d02b1cf54e5d39028dbcf8d", size = 6757533, upload-time = "2024-11-11T14:57:15.142Z" },
- { url = "https://files.pythonhosted.org/packages/ed/6e/b2eed8dec617264f0046d50a13a42d3f0a06c50071b9fc1eae00285a03f1/gevent-24.11.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:58851f23c4bdb70390f10fc020c973ffcf409eb1664086792c8b1e20f25eef43", size = 5449436, upload-time = "2024-11-11T15:37:08.143Z" },
- { url = "https://files.pythonhosted.org/packages/63/c2/eca6b95fbf9af287fa91c327494e4b74a8d5bfa0156cd87b233f63f118dc/gevent-24.11.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:1ea50009ecb7f1327347c37e9eb6561bdbc7de290769ee1404107b9a9cba7cf1", size = 6866470, upload-time = "2024-11-11T15:03:48.724Z" },
- { url = "https://files.pythonhosted.org/packages/b7/e6/51824bd1f2c1ce70aa01495aa6ffe04ab789fa819fa7e6f0ad2388fb03c6/gevent-24.11.1-cp313-cp313-win_amd64.whl", hash = "sha256:ec68e270543ecd532c4c1d70fca020f90aa5486ad49c4f3b8b2e64a66f5c9274", size = 1540088, upload-time = "2024-11-11T14:46:23.849Z" },
- { url = "https://files.pythonhosted.org/packages/86/63/197aa67250943b508b34995c2aa6b46402e7e6f11785487740c2057bfb20/gevent-24.11.1-pp310-pypy310_pp73-macosx_11_0_universal2.whl", hash = "sha256:f43f47e702d0c8e1b8b997c00f1601486f9f976f84ab704f8f11536e3fa144c9", size = 1271676, upload-time = "2024-11-11T14:32:44.515Z" },
+ { url = "https://files.pythonhosted.org/packages/44/a7/438568c37fb255f80e710318bfcad04731b92ce764bc16adee278fdc6b4d/gevent-25.5.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:8e5a0fab5e245b15ec1005b3666b0a2e867c26f411c8fe66ae1afe07174a30e9", size = 2922800, upload-time = "2025-05-12T11:11:46.728Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/b3/b44d8b1c4a4d01097a7f82ffbc582d054007365c27b28867f0b2d4241d73/gevent-25.5.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7b80a37f2fb45ee4a8f7e64b77dd8a842d364384046e394227b974a4e9c9a52", size = 1812954, upload-time = "2025-05-12T11:52:27.059Z" },
+ { url = "https://files.pythonhosted.org/packages/1e/c6/935b4c973ad827c9ec49c354d68d047da1d23e3018bda63d3723cce43178/gevent-25.5.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29ab729d50ae85077a68e0385f129f5b01052d01a0ae6d7fdc1824f5337905e4", size = 1900169, upload-time = "2025-05-12T11:54:17.797Z" },
+ { url = "https://files.pythonhosted.org/packages/38/8a/b745bddfec35fb723cafb036f191e5e0a0013f1698bf0ba4fa2cb8e01879/gevent-25.5.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80d20592aeabcc4e294fd441fd43d45cb537437fd642c374ea9d964622fad229", size = 1849786, upload-time = "2025-05-12T12:00:01.962Z" },
+ { url = "https://files.pythonhosted.org/packages/7c/b3/7aa7b09d91207bebe7608699558bbadd34f63e32904351867c29f8be25de/gevent-25.5.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8ba0257542ccbb72a8229dc34d00844ccdfba110417e4b7b34599548d0e20e9", size = 2139021, upload-time = "2025-05-12T11:32:58.961Z" },
+ { url = "https://files.pythonhosted.org/packages/74/da/cf52ae0c84361f4164a04f3338508b1234331ce79719db103e50dbc5598c/gevent-25.5.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cad0821dff998c7c60dd238f92cd61380342c47fb9e92e1a8705d9b5ac7c16e8", size = 1830758, upload-time = "2025-05-12T11:59:55.666Z" },
+ { url = "https://files.pythonhosted.org/packages/93/93/73a49b896d78eec27f0895ce3008f9825db748a5aacbca47404d1014da4b/gevent-25.5.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:017a7384c0cd1a5907751c991535a0699596e89725468a7fc39228312e10efa1", size = 2199993, upload-time = "2025-05-12T11:40:50.845Z" },
+ { url = "https://files.pythonhosted.org/packages/df/c7/34680b7d2a75492fa032fa8ecaacc03c1940767a35125f6740954a0132a3/gevent-25.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:469c86d02fccad7e2a3d82fe22237e47ecb376fbf4710bc18747b49c50716817", size = 1652665, upload-time = "2025-05-12T12:35:58.105Z" },
+ { url = "https://files.pythonhosted.org/packages/c6/eb/015e93f16a718e2f836ecebecae9bcd7b4d2a5695d1c8bd5bba2d5d91548/gevent-25.5.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:12380aba5c316e9ff53cc21d8ab80f4a91c0df3ada58f65d4f5eb2cf693db00e", size = 2877441, upload-time = "2025-05-12T11:14:57.735Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/86/42d191a6f6672ca59d6d79b4cd9b89d4a15f59c843fbbad42f2b749f8ea9/gevent-25.5.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f0694daab1a041b69a53f53c2141c12994892b2503870515cabe6a5dbd2a928", size = 1774873, upload-time = "2025-05-12T11:52:29.015Z" },
+ { url = "https://files.pythonhosted.org/packages/f5/9f/42dd255849c9ca2e814f5cbe180980594007ba19044a132cf674069e38bf/gevent-25.5.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2797885e9aeffdc98e1846723e5aa212e7ce53007dbef40d6fd2add264235c41", size = 1857911, upload-time = "2025-05-12T11:54:19.523Z" },
+ { url = "https://files.pythonhosted.org/packages/3e/fc/8e799a733be48f6114bfc531b94e28812741664d8af89872dd90e117f8a4/gevent-25.5.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cde6aaac36b54332e10ea2a5bc0de6a8aba6c205c92603fe4396e3777c88e05d", size = 1812751, upload-time = "2025-05-12T12:00:03.719Z" },
+ { url = "https://files.pythonhosted.org/packages/52/4f/a3f3acd961887da10cb0b49c3d915201973d59ce6bf49e2922eaf2058d5f/gevent-25.5.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24484f80f14befb8822bf29554cfb3a26a26cb69cd1e5a8be9e23b4bd7a96e25", size = 2087115, upload-time = "2025-05-12T11:33:01.128Z" },
+ { url = "https://files.pythonhosted.org/packages/b6/27/bb38e005106a53787c13ad1f9f73ed990e403e462108acae6320ab11d442/gevent-25.5.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8fdc7446895fa184890d8ca5ea61e502691114f9db55c9b76adc33f3086c4368", size = 1793549, upload-time = "2025-05-12T11:59:57.854Z" },
+ { url = "https://files.pythonhosted.org/packages/ee/56/da817bc69e1f0ae8438f12f2cd150656b09a8c3576c6d12f992dc9ca64ef/gevent-25.5.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5b6106e2414b1797133786258fa1962a5e836480e4d5e861577f9fc63b673a5a", size = 2145899, upload-time = "2025-05-12T11:40:53.275Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/42/989403abbdbb1346a1507083c02018bee3fedaef3f9648940c767d8c0958/gevent-25.5.1-cp311-cp311-win_amd64.whl", hash = "sha256:bc899212d90f311784c58938a9c09c59802fb6dc287a35fabdc36d180f57f575", size = 1635771, upload-time = "2025-05-12T12:26:47.644Z" },
+ { url = "https://files.pythonhosted.org/packages/58/c5/cf71423666a0b83db3d7e3f85788bc47d573fca5fe62b798fe2c4273de7c/gevent-25.5.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:d87c0a1bd809d8f70f96b9b229779ec6647339830b8888a192beed33ac8d129f", size = 2909333, upload-time = "2025-05-12T11:11:34.883Z" },
+ { url = "https://files.pythonhosted.org/packages/26/7e/d2f174ee8bec6eb85d961ca203bc599d059c857b8412e367b8fa206603a5/gevent-25.5.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b87a4b66edb3808d4d07bbdb0deed5a710cf3d3c531e082759afd283758bb649", size = 1788420, upload-time = "2025-05-12T11:52:30.306Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/f3/3aba8c147b9108e62ba348c726fe38ae69735a233db425565227336e8ce6/gevent-25.5.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f076779050029a82feb0cb1462021d3404d22f80fa76a181b1a7889cd4d6b519", size = 1868854, upload-time = "2025-05-12T11:54:21.564Z" },
+ { url = "https://files.pythonhosted.org/packages/c6/b1/11a5453f8fcebe90a456471fad48bd154c6a62fcb96e3475a5e408d05fc8/gevent-25.5.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bb673eb291c19370f69295f7a881a536451408481e2e3deec3f41dedb7c281ec", size = 1833946, upload-time = "2025-05-12T12:00:05.514Z" },
+ { url = "https://files.pythonhosted.org/packages/70/1c/37d4a62303f86e6af67660a8df38c1171b7290df61b358e618c6fea79567/gevent-25.5.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1325ed44225c8309c0dd188bdbbbee79e1df8c11ceccac226b861c7d52e4837", size = 2070583, upload-time = "2025-05-12T11:33:02.803Z" },
+ { url = "https://files.pythonhosted.org/packages/4b/8f/3b14929ff28263aba1d268ea97bcf104be1a86ba6f6bb4633838e7a1905e/gevent-25.5.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:fcd5bcad3102bde686d0adcc341fade6245186050ce14386d547ccab4bd54310", size = 1808341, upload-time = "2025-05-12T11:59:59.154Z" },
+ { url = "https://files.pythonhosted.org/packages/2f/fc/674ec819fb8a96e482e4d21f8baa43d34602dba09dfce7bbdc8700899d1b/gevent-25.5.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1a93062609e8fa67ec97cd5fb9206886774b2a09b24887f40148c9c37e6fb71c", size = 2137974, upload-time = "2025-05-12T11:40:54.78Z" },
+ { url = "https://files.pythonhosted.org/packages/05/9a/048b7f5e28c54e4595ad4a8ad3c338fa89560e558db2bbe8273f44f030de/gevent-25.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:2534c23dc32bed62b659ed4fd9e198906179e68b26c9276a897e04163bdde806", size = 1638344, upload-time = "2025-05-12T12:08:31.776Z" },
+ { url = "https://files.pythonhosted.org/packages/10/25/2162b38d7b48e08865db6772d632bd1648136ce2bb50e340565e45607cad/gevent-25.5.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:a022a9de9275ce0b390b7315595454258c525dc8287a03f1a6cacc5878ab7cbc", size = 2928044, upload-time = "2025-05-12T11:11:36.33Z" },
+ { url = "https://files.pythonhosted.org/packages/1b/e0/dbd597a964ed00176da122ea759bf2a6c1504f1e9f08e185379f92dc355f/gevent-25.5.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3fae8533f9d0ef3348a1f503edcfb531ef7a0236b57da1e24339aceb0ce52922", size = 1788751, upload-time = "2025-05-12T11:52:32.643Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/74/960cc4cf4c9c90eafbe0efc238cdf588862e8e278d0b8c0d15a0da4ed480/gevent-25.5.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c7b32d9c3b5294b39ea9060e20c582e49e1ec81edbfeae6cf05f8ad0829cb13d", size = 1869766, upload-time = "2025-05-12T11:54:23.903Z" },
+ { url = "https://files.pythonhosted.org/packages/56/78/fa84b1c7db79b156929685db09a7c18c3127361dca18a09e998e98118506/gevent-25.5.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b95815fe44f318ebbfd733b6428b4cb18cc5e68f1c40e8501dd69cc1f42a83d", size = 1835358, upload-time = "2025-05-12T12:00:06.794Z" },
+ { url = "https://files.pythonhosted.org/packages/00/5c/bfefe3822bbca5b83bfad256c82251b3f5be13d52d14e17a786847b9b625/gevent-25.5.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d316529b70d325b183b2f3f5cde958911ff7be12eb2b532b5c301f915dbbf1e", size = 2073071, upload-time = "2025-05-12T11:33:04.2Z" },
+ { url = "https://files.pythonhosted.org/packages/20/e4/08a77a3839a37db96393dea952e992d5846a881b887986dde62ead6b48a1/gevent-25.5.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f6ba33c13db91ffdbb489a4f3d177a261ea1843923e1d68a5636c53fe98fa5ce", size = 1809805, upload-time = "2025-05-12T12:00:00.537Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/ac/28848348f790c1283df74b0fc0a554271d0606676470f848eccf84eae42a/gevent-25.5.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:37ee34b77c7553777c0b8379915f75934c3f9c8cd32f7cd098ea43c9323c2276", size = 2138305, upload-time = "2025-05-12T11:40:56.566Z" },
+ { url = "https://files.pythonhosted.org/packages/52/9e/0e9e40facd2d714bfb00f71fc6dacaacc82c24c1c2e097bf6461e00dec9f/gevent-25.5.1-cp313-cp313-win_amd64.whl", hash = "sha256:9fa6aa0da224ed807d3b76cdb4ee8b54d4d4d5e018aed2478098e685baae7896", size = 1637444, upload-time = "2025-05-12T12:17:45.995Z" },
+ { url = "https://files.pythonhosted.org/packages/11/81/834da3c1ea5e71e4dc1a78a034a15f2813d9760d135464aae5d1f058a8c6/gevent-25.5.1-pp310-pypy310_pp73-macosx_11_0_universal2.whl", hash = "sha256:60ad4ca9ca2c4cc8201b607c229cd17af749831e371d006d8a91303bb5568eb1", size = 1291540, upload-time = "2025-05-12T11:11:55.456Z" },
]
[[package]]
@@ -3466,16 +3466,16 @@ wheels = [
[[package]]
name = "hypothesis"
-version = "6.135.0"
+version = "6.135.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "attrs" },
{ name = "exceptiongroup", marker = "python_full_version < '3.11'" },
{ name = "sortedcontainers" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/f9/ad/519dc222d624017d4d4890c3cb832892aec372ebdcaaed026c21b23da91f/hypothesis-6.135.0.tar.gz", hash = "sha256:d71b92ab845959aaa5b10bb8638f19349b6ec1222e731da542a25c162aba528a", size = 449272, upload-time = "2025-06-03T23:05:59.76Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/77/d5/f569c4f93c84b7f8ea147da6100050a36940a0ece799887bcd75b554f2ad/hypothesis-6.135.1.tar.gz", hash = "sha256:36eea411ef5dde5612301fcd9a293b6f2a3a5ab96488be2e23e7c5799cbd7b33", size = 449455, upload-time = "2025-06-05T22:08:12.353Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/b5/5a/b67bb690744d957086cab028b2bdcb5c6ad5c1c2428aaab4a6e45709a677/hypothesis-6.135.0-py3-none-any.whl", hash = "sha256:7a1eb93809c628767db115f50a3e95ad96390c0b98260b975660014c346a19e9", size = 514970, upload-time = "2025-06-03T23:05:54.656Z" },
+ { url = "https://files.pythonhosted.org/packages/30/15/4bcd915f4bd747bb258482d8e705ef27a1584b7744e26bec72a9289a6e4f/hypothesis-6.135.1-py3-none-any.whl", hash = "sha256:14fab728bfe2409a3934e6e1ea6ae0a706d0bc78187137218a253aec7528b4c8", size = 515130, upload-time = "2025-06-05T22:08:08.467Z" },
]
[[package]]
@@ -5587,7 +5587,7 @@ wheels = [
[[package]]
name = "locust"
-version = "2.37.7"
+version = "2.37.9"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "configargparse" },
@@ -5607,25 +5607,26 @@ dependencies = [
{ name = "typing-extensions", marker = "python_full_version < '3.11'" },
{ name = "werkzeug" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/7a/53/09abcf585c001deec3d6c2ccf13ca5d2b59645da6c9458fe830189e1bfe3/locust-2.37.7.tar.gz", hash = "sha256:9421ff51ce023a5ddff74f2544a13310f7929ca02452be2663130ffab26585a1", size = 2252484, upload-time = "2025-06-03T08:29:14.142Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/90/05/2bfdf19756c6a12f6f9513f75340ecf0595d83cab4d9fc91162225908e3d/locust-2.37.9.tar.gz", hash = "sha256:e43673b594ec5ecde4f9ba6e0d5c66c00d7c0ae93591951abe83e8d186c67175", size = 2252507, upload-time = "2025-06-05T09:26:58.186Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/5b/21/04567b65343f65840364f56d5e7131360ffcd6793b2c91533a1a9f69668b/locust-2.37.7-py3-none-any.whl", hash = "sha256:925d78f4834d6434ec9a9c4f3984228f762d2c2d95b2804e9b0e9fc856f2d3b5", size = 2269078, upload-time = "2025-06-03T08:29:11.616Z" },
+ { url = "https://files.pythonhosted.org/packages/33/1c/0ece4176231c578e819d970ec08d124492833e50aafd171c582bcc414446/locust-2.37.9-py3-none-any.whl", hash = "sha256:e17da439f3a252d1fb6d4c34daf00d7e8b87e99d833a32e8a79f4f8ebb07767d", size = 2269084, upload-time = "2025-06-05T09:26:56.257Z" },
]
[[package]]
name = "locust-cloud"
-version = "1.23.0"
+version = "1.23.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "configargparse" },
{ name = "gevent" },
{ name = "platformdirs" },
+ { name = "python-engineio" },
{ name = "python-socketio", extra = ["client"] },
{ name = "tomli", marker = "python_full_version < '3.11'" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/69/cb/db2d9dd27317d22d98f7a4a0eebbe4dc6ff24e44c0ae509118370e605c3b/locust_cloud-1.23.0.tar.gz", hash = "sha256:4038a09eda858b483ced20f5cb82caf3f866244c2c7864e0da5c32722b97f532", size = 450778, upload-time = "2025-06-03T08:12:01.169Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/bd/7c/d9cbbd051490aeedfbd6ddda8ad48f77dd848ee490f6ebd166d20db5911e/locust_cloud-1.23.1.tar.gz", hash = "sha256:a09161752b8c9a9205e97cef5223ee3ad967bc2d91c52d61952aaa3da6802a55", size = 450937, upload-time = "2025-06-05T06:07:53.773Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/a9/d8/0f633e39ed3f3237430c6bcb9d18402dea6bef7eaa60a484cd1d4d939e81/locust_cloud-1.23.0-py3-none-any.whl", hash = "sha256:936cc4feb0b0dd89e113f6318a1205f318ed49a9df4e0feca8d40f2d9b353d30", size = 408308, upload-time = "2025-06-03T08:11:58.228Z" },
+ { url = "https://files.pythonhosted.org/packages/7e/01/5af43edee540e38ba0ee0a2e3beb72c50073e0f646bb543a8b34650315e3/locust_cloud-1.23.1-py3-none-any.whl", hash = "sha256:11677895c6ed6d0beef1b425a6f04f10ea2cfcaeaefbf00a97fb3c9134296e54", size = 408323, upload-time = "2025-06-05T06:07:51.947Z" },
]
[[package]]
@@ -10404,11 +10405,11 @@ wheels = [
[[package]]
name = "types-aiofiles"
-version = "24.1.0.20250516"
+version = "24.1.0.20250606"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/b3/13/41faafda1d85fc8afa744ee67a2d788b3ba63e5f1c7303e5d10c2d784d2d/types_aiofiles-24.1.0.20250516.tar.gz", hash = "sha256:7fd2a7f793bbe180b7b22cd4f59300fe61fdc9940b3bbc9899ffe32849b95188", size = 14304, upload-time = "2025-05-16T03:08:29.55Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/64/6e/fac4ffc896cb3faf2ac5d23747b65dd8bae1d9ee23305d1a3b12111c3989/types_aiofiles-24.1.0.20250606.tar.gz", hash = "sha256:48f9e26d2738a21e0b0f19381f713dcdb852a36727da8414b1ada145d40a18fe", size = 14364, upload-time = "2025-06-06T03:09:26.515Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/70/e7/828287ba5d1107db1093a123fe8e481eb5aab55c911f1100f28d0df80d5a/types_aiofiles-24.1.0.20250516-py3-none-any.whl", hash = "sha256:ec265994629146804b656a971c46f393ce860305834b3cacb4b8b6fb7dba7e33", size = 14253, upload-time = "2025-05-16T03:08:28.67Z" },
+ { url = "https://files.pythonhosted.org/packages/71/de/f2fa2ab8a5943898e93d8036941e05bfd1e1f377a675ee52c7c307dccb75/types_aiofiles-24.1.0.20250606-py3-none-any.whl", hash = "sha256:e568c53fb9017c80897a9aa15c74bf43b7ee90e412286ec1e0912b6e79301aee", size = 14276, upload-time = "2025-06-06T03:09:25.662Z" },
]
[[package]]